[med-svn] [python-mne] 01/13: Imported Upstream version 0.10+dfsg

Yaroslav Halchenko debian at onerussian.com
Wed Nov 25 16:20:26 UTC 2015


This is an automated email from the git hooks/post-receive script.

yoh pushed a commit to branch master
in repository python-mne.

commit 9b5da07eeeb7305161b49bcacae21a93f5e97b4b
Author: jaeilepp <jaeilepp at student.jyu.fi>
Date:   Mon Nov 9 05:50:24 2015 -0500

    Imported Upstream version 0.10+dfsg
---
 .gitignore                                         |   12 +-
 .mailmap                                           |   17 +-
 .travis.yml                                        |  158 +-
 MANIFEST.in                                        |   21 +-
 Makefile                                           |   62 +-
 README.rst                                         |   14 +-
 appveyor.yml                                       |   36 +
 bin/mne                                            |    1 +
 doc/Makefile                                       |   88 +-
 doc/{source => }/_static/branch_dropdown.png       |  Bin
 doc/{source => }/_static/favicon.ico               |  Bin
 doc/_static/flow_diagram.svg                       |  204 +
 doc/{source => }/_static/forking_button.png        |  Bin
 doc/_static/institutions.png                       |  Bin 0 -> 25284 bytes
 doc/{source/_images => _static}/mne_helmet.png     |  Bin
 doc/_static/mne_logo.png                           |  Bin 0 -> 39669 bytes
 doc/_static/mne_logo_small.png                     |  Bin 0 -> 868 bytes
 doc/{source => }/_static/pull_button.png           |  Bin
 doc/_static/style.css                              |   46 +
 doc/{source => }/_templates/class.rst              |    0
 doc/{source => }/_templates/function.rst           |    0
 doc/{source => }/_templates/layout.html            |   28 +-
 doc/advanced_setup.rst                             |  130 +
 doc/build_doc                                      |   16 -
 doc/{source => }/cite.rst                          |   11 +-
 doc/{source => }/conf.py                           |  177 +-
 doc/{source => }/contributing.rst                  |  210 +-
 doc/{source => }/customizing_git.rst               |    4 +
 doc/faq.rst                                        |   75 +
 doc/getting_started.rst                            |  321 +
 doc/{source => }/git_links.inc                     |   51 +-
 doc/index.rst                                      |  137 +
 doc/{source => }/known_projects.inc                |    9 +-
 doc/{source => }/links.inc                         |    0
 .../AppA.rst => manual/appendix/bem_model.rst}     |   73 +-
 .../AppEULA.rst => manual/appendix/c_EULA.rst}     |   10 +-
 doc/manual/appendix/c_misc.rst                     |   99 +
 .../appendix/c_release_notes.rst}                  |  205 +-
 doc/manual/appendix/martinos.rst                   |  117 +
 doc/manual/c_reference.rst                         | 6442 ++++++++++++++++++++
 doc/manual/cookbook.rst                            |  420 ++
 .../manual/sampledata.rst => manual/datasets.rst}  |   89 +-
 doc/{source/manual => manual/gui}/analyze.rst      |   99 +-
 doc/{source/manual => manual/gui}/browse.rst       |  457 +-
 .../gui}/mne_analyze/MNE_preferences.png           |  Bin
 .../gui}/mne_analyze/adjust_alignment.png          |  Bin
 .../gui}/mne_analyze/adjust_lights.png             |  Bin
 .../gui}/mne_analyze/adjust_menu.png               |  Bin
 .../gui}/mne_analyze/cont_hpi_data.png             |  Bin
 .../gui}/mne_analyze/dipole_list.png               |  Bin
 .../gui}/mne_analyze/dipole_parameters.png         |  Bin
 .../gui}/mne_analyze/dipoles_menu.png              |  Bin
 .../gui}/mne_analyze/epoch_selector.png            |  Bin
 .../gui}/mne_analyze/field_mapping_pref.png        |  Bin
 .../gui}/mne_analyze/file_menu.png                 |  Bin
 .../gui}/mne_analyze/hardcopy_controls.png         |  Bin
 .../gui}/mne_analyze/help_menu.png                 |  Bin
 .../gui}/mne_analyze/image_dialog.png              |  Bin
 .../gui}/mne_analyze/label_list.png                |  Bin
 .../gui}/mne_analyze/labels_menu.png               |  Bin
 .../gui}/mne_analyze/main_window.png               |  Bin
 .../gui}/mne_analyze/movie_dialog.png              |  Bin
 .../gui}/mne_analyze/mri_viewer.png                |  Bin
 .../gui}/mne_analyze/open_dialog.png               |  Bin
 .../gui}/mne_analyze/overlay_management.png        |  Bin
 .../gui}/mne_analyze/patch_selection_dialog.png    |  Bin
 .../gui}/mne_analyze/save_label_timecourse.png     |  Bin
 .../gui}/mne_analyze/scales_dialog.png             |  Bin
 .../gui}/mne_analyze/surface_controls.png          |  Bin
 .../gui}/mne_analyze/surface_selection_dialog.png  |  Bin
 .../gui}/mne_analyze/timecourse_manager.png        |  Bin
 .../gui}/mne_analyze/view_menu.png                 |  Bin
 .../manual => manual/gui}/mne_analyze/viewer.png   |  Bin
 .../gui}/mne_analyze/viewer_options.png            |  Bin
 .../gui}/mne_analyze/visualize_hpi.png             |  Bin
 .../gui}/mne_analyze/windows_menu.png              |  Bin
 .../gui}/mne_browse_raw/adjust_menu.png            |  Bin
 .../gui}/mne_browse_raw/adust_menu.png             |  Bin
 .../gui}/mne_browse_raw/average_pref.png           |  Bin
 .../gui}/mne_browse_raw/channel_selection.png      |  Bin
 .../gui}/mne_browse_raw/file_menu.png              |  Bin
 .../gui}/mne_browse_raw/filter_dialog.png          |  Bin
 .../gui}/mne_browse_raw/help_menu.png              |  Bin
 .../manual => manual/gui}/mne_browse_raw/main.png  |  Bin
 .../gui}/mne_browse_raw/manage_averages_dialog.png |  Bin
 .../gui}/mne_browse_raw/new_selection.png          |  Bin
 .../gui}/mne_browse_raw/new_ssp.png                |  Bin
 .../gui}/mne_browse_raw/open_dialog copy.png       |  Bin
 .../gui}/mne_browse_raw/open_dialog.png            |  Bin
 .../gui}/mne_browse_raw/process_menu.png           |  Bin
 .../gui}/mne_browse_raw/process_menu2.png          |  Bin
 .../gui}/mne_browse_raw/scales_dialog.png          |  Bin
 .../gui}/mne_browse_raw/scales_dialog2.png         |  Bin
 .../gui}/mne_browse_raw/toolbar.png                |  Bin
 .../gui}/mne_browse_raw/windows_menu-0.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-1.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-10.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-11.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-12.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-13.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-14.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-15.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-16.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-17.png        |  Bin
 .../gui}/mne_browse_raw/windows_menu-2.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-3.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-4.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-5.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-6.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-7.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-8.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu-9.png         |  Bin
 .../gui}/mne_browse_raw/windows_menu.png           |  Bin
 doc/manual/index.rst                               |  142 +
 doc/manual/io.rst                                  |  343 ++
 doc/{source => }/manual/matlab.rst                 |   20 +-
 doc/{source => }/manual/pics/CoordinateSystems.png |  Bin
 doc/{source => }/manual/pics/Digitizer-example.png |  Bin
 doc/{source => }/manual/pics/Flowchart.png         |  Bin
 doc/{source => }/manual/pics/HeadCS.png            |  Bin
 doc/manual/pics/ICA_primer.png                     |  Bin 0 -> 122544 bytes
 doc/{source => }/manual/pics/cover.png             |  Bin
 doc/{source => }/manual/pics/flat.png              |  Bin
 doc/{source => }/manual/pics/morphed.png           |  Bin
 doc/{source => }/manual/pics/neuromag.png          |  Bin
 doc/{source => }/manual/pics/orig.png              |  Bin
 doc/{source => }/manual/pics/proj-off-on.png       |  Bin
 doc/{source => }/manual/pics/title_page.png        |  Bin
 doc/manual/preprocessing/bads.rst                  |    3 +
 doc/manual/preprocessing/filter.rst                |    3 +
 doc/manual/preprocessing/ica.rst                   |  118 +
 doc/manual/preprocessing/overview.rst              |    3 +
 doc/manual/preprocessing/ssp.rst                   |  128 +
 doc/manual/source_localization/covariance.rst      |    5 +
 .../source_localization}/forward.rst               |  504 +-
 doc/manual/source_localization/inverse.rst         |  520 ++
 doc/manual/source_localization/morph.rst           |  141 +
 doc/manual/statistics.rst                          |  100 +
 doc/manual/time_frequency.rst                      |   34 +
 doc/manual/visualization.rst                       |    3 +
 doc/mne_cpp.rst                                    |   18 +
 doc/{source => }/python_reference.rst              |  252 +-
 doc/{source/manual/reading.rst => references.rst}  |    0
 doc/source/_images/plot_time_frequency.png         |  Bin 151383 -> 0 bytes
 doc/source/_static/default.css                     |  515 --
 doc/source/_static/institutions.png                |  Bin 71392 -> 0 bytes
 doc/source/_static/mne_logo.png                    |  Bin 77583 -> 0 bytes
 doc/source/_static/navy.css                        |  515 --
 doc/source/_templates/sidebar.html                 |    5 -
 doc/source/getting_started.rst                     |  205 -
 doc/source/index.rst                               |   56 -
 doc/source/manual.rst                              |   28 -
 doc/source/manual/AppB.rst                         |  294 -
 doc/source/manual/AppInstall.rst                   |  174 -
 doc/source/manual/convert.rst                      | 2312 -------
 doc/source/manual/cookbook.rst                     | 1066 ----
 doc/source/manual/intro.rst                        |   45 -
 doc/source/manual/list.rst                         |  439 --
 doc/source/manual/mne.rst                          | 1323 ----
 doc/source/manual/morph.rst                        |  409 --
 doc/source/manual/pics/Averaging-flowchart.png     |  Bin 16254 -> 0 bytes
 doc/source/manual/utilities.rst                    | 1402 -----
 doc/source/mne-python.rst                          |   27 -
 doc/source/python_tutorial.rst                     |  396 --
 doc/source/this_project.inc                        |    5 -
 doc/sphinxext/commands.py                          |   80 +
 doc/sphinxext/docscrape.py                         |  497 --
 doc/sphinxext/docscrape_sphinx.py                  |  137 -
 doc/sphinxext/flow_diagram.py                      |  166 +
 doc/sphinxext/gen_rst.py                           |  946 ---
 doc/sphinxext/numpy_ext/docscrape.py               |  103 +-
 doc/sphinxext/numpy_ext/docscrape_sphinx.py        |   46 +-
 doc/sphinxext/numpy_ext/numpydoc.py                |   79 +-
 doc/sphinxext/numpy_ext_old/docscrape.py           |  490 --
 doc/sphinxext/numpy_ext_old/docscrape_sphinx.py    |  133 -
 doc/sphinxext/numpy_ext_old/numpydoc.py            |  111 -
 doc/this_project.inc                               |    4 +
 doc/tutorials.rst                                  |   84 +
 .../_images/plot_read_and_write_raw_data.png       |  Bin
 .../_images/plot_read_epochs.png                   |  Bin
 doc/tutorials/_images/plot_time_frequency.png      |  Bin 0 -> 170691 bytes
 .../command_line.rst}                              |    0
 .../report.rst}                                    |   59 +-
 doc/upload_html.sh                                 |    2 +-
 doc/utils/extract_config_doc.py                    |   73 -
 doc/utils/lut2sphinxtbl.py                         |   65 -
 doc/utils/make_clean_config.py                     |   30 -
 doc/{source => }/whats_new.rst                     |  540 +-
 examples/README.txt                                |   11 +-
 .../connectivity/plot_cwt_sensor_connectivity.py   |   20 +-
 .../plot_mne_inverse_coherence_epochs.py           |   24 +-
 .../plot_mne_inverse_connectivity_spectrum.py      |   12 +-
 .../plot_mne_inverse_label_connectivity.py         |   16 +-
 .../connectivity/plot_mne_inverse_psi_visual.py    |   15 +-
 examples/connectivity/plot_sensor_connectivity.py  |   27 +-
 examples/datasets/plot_brainstorm_data.py          |   74 +
 examples/datasets/plot_megsim_data.py              |    7 +-
 examples/datasets/plot_megsim_data_single_trial.py |    5 +-
 examples/datasets/plot_spm_faces_dataset.py        |   34 +-
 examples/decoding/plot_decoding_csp_eeg.py         |   31 +-
 examples/decoding/plot_decoding_csp_space.py       |   15 +-
 examples/decoding/plot_decoding_sensors.py         |   63 +-
 .../plot_decoding_spatio_temporal_source.py        |   20 +-
 .../decoding/plot_decoding_time_generalization.py  |   78 +-
 ...plot_decoding_time_generalization_conditions.py |   75 +
 examples/decoding/plot_decoding_xdawn_eeg.py       |  101 +
 examples/decoding/plot_ems_filtering.py            |    8 +-
 examples/decoding/plot_linear_model_patterns.py    |   84 +
 examples/export/README.txt                         |    5 -
 examples/export/plot_epochs_to_nitime.py           |   65 -
 examples/export/plot_evoked_to_nitime.py           |   34 -
 examples/export/plot_raw_to_nitime.py              |   83 -
 examples/forward/README.txt                        |    6 +
 examples/{ => forward}/plot_bem_contour_mri.py     |    2 +
 .../{ => forward}/plot_coregistration_transform.py |    4 +-
 .../{ => forward}/plot_decimate_head_surface.py    |   25 +-
 .../forward/plot_left_cerebellum_volume_source.py  |   96 +
 examples/{ => forward}/plot_make_forward.py        |   24 +-
 examples/{ => forward}/plot_read_bem_surfaces.py   |   13 +-
 .../plot_read_forward.py}                          |   58 +-
 examples/forward/plot_source_space_morphing.py     |   68 +
 .../plot_compute_mne_inverse_epochs_in_label.py    |    8 +-
 .../plot_compute_mne_inverse_raw_in_label.py       |    5 +-
 .../inverse/plot_compute_mne_inverse_volume.py     |   31 +-
 examples/inverse/plot_covariance_whitening_dspm.py |  160 +
 examples/inverse/plot_dics_beamformer.py           |   13 +-
 examples/inverse/plot_dics_source_power.py         |   16 +-
 examples/inverse/plot_dipole_fit.py                |   43 +
 examples/inverse/plot_dipole_fit_result.py         |   82 -
 examples/inverse/plot_gamma_map_inverse.py         |   13 +-
 examples/inverse/plot_label_activation_from_stc.py |  122 +-
 examples/inverse/plot_label_from_stc.py            |   12 +-
 examples/inverse/plot_label_source_activations.py  |    5 +-
 examples/inverse/plot_lcmv_beamformer.py           |   24 +-
 examples/inverse/plot_lcmv_beamformer_volume.py    |   35 +-
 examples/inverse/plot_make_inverse_operator.py     |   12 +-
 ...m_L21_inverse.py => plot_mixed_norm_inverse.py} |   34 +-
 examples/inverse/plot_mne_crosstalk_function.py    |   42 +-
 examples/inverse/plot_mne_point_spread_function.py |   47 +-
 examples/inverse/plot_morph_data.py                |   15 +-
 examples/inverse/plot_rap_music.py                 |   57 +
 examples/inverse/plot_read_inverse.py              |   28 +-
 examples/inverse/plot_read_source_space.py         |   28 +-
 examples/inverse/plot_read_stc.py                  |    5 +-
 examples/inverse/plot_snr_estimate.py              |   29 +
 examples/inverse/plot_tf_dics.py                   |    5 +-
 examples/inverse/plot_tf_lcmv.py                   |   16 +-
 .../plot_time_frequency_mixed_norm_inverse.py      |   30 +-
 examples/io/README.txt                             |    5 +
 examples/io/plot_objects_from_arrays.py            |  121 +
 examples/{ => io}/plot_read_and_write_raw_data.py  |    5 +-
 examples/{ => io}/plot_read_epochs.py              |    6 +-
 examples/{ => io}/plot_read_evoked.py              |    4 +-
 .../{ => io}/plot_read_noise_covariance_matrix.py  |   20 +-
 examples/{ => io}/read_events.py                   |    4 +-
 examples/{inverse => }/plot_compute_mne_inverse.py |   10 +-
 examples/plot_evoked_whitening.py                  |   46 -
 examples/plot_extract_events_from_raw.py           |    6 +-
 examples/plot_from_raw_to_epochs_to_evoked.py      |   55 +-
 .../plot_from_raw_to_multiple_epochs_to_evoked.py  |   71 -
 examples/plot_read_forward.py                      |   46 -
 examples/preprocessing/plot_corrmap_detection.py   |   76 +
 .../plot_define_target_events.py                   |    8 +-
 .../preprocessing/plot_eog_artifact_histogram.py   |    7 +-
 .../plot_estimate_covariance_matrix_baseline.py    |    4 +-
 .../plot_estimate_covariance_matrix_raw.py         |    6 +-
 examples/preprocessing/plot_find_ecg_artifacts.py  |    5 +-
 examples/preprocessing/plot_find_eog_artifacts.py  |    7 +-
 examples/preprocessing/plot_ica_from_epochs.py     |   75 -
 .../preprocessing/plot_interpolate_bad_channels.py |   43 +
 examples/preprocessing/plot_rereference_eeg.py     |   65 +
 examples/preprocessing/plot_resample.py            |   85 +
 examples/preprocessing/plot_run_ica.py             |   47 +
 examples/{ => preprocessing}/plot_shift_evoked.py  |    6 +-
 examples/preprocessing/plot_virtual_evoked.py      |   39 +
 examples/preprocessing/plot_xdawn_denoising.py     |   80 +
 examples/realtime/ftclient_rt_average.py           |   17 +-
 examples/realtime/ftclient_rt_compute_psd.py       |   74 +
 examples/realtime/plot_compute_rt_average.py       |   13 +-
 examples/realtime/plot_compute_rt_decoder.py       |   48 +-
 examples/realtime/rt_feedback_client.py            |    5 +-
 examples/realtime/rt_feedback_server.py            |   21 +-
 examples/simulation/README.txt                     |    5 +
 .../{ => simulation}/plot_simulate_evoked_data.py  |   58 +-
 examples/simulation/plot_simulate_raw_data.py      |   79 +
 examples/stats/plot_cluster_stats_evoked.py        |   11 +-
 examples/stats/plot_fdr_stats_evoked.py            |    8 +-
 examples/stats/plot_linear_regression_raw.py       |   67 +
 examples/stats/plot_sensor_permutation_test.py     |   11 +-
 examples/stats/plot_sensor_regression.py           |   15 +-
 .../plot_compute_raw_data_spectrum.py              |   20 +-
 .../plot_compute_source_psd_epochs.py              |    7 +-
 examples/time_frequency/plot_epochs_spectra.py     |   45 +
 .../time_frequency/plot_single_trial_spectra.py    |   96 -
 .../plot_source_label_time_frequency.py            |   19 +-
 .../time_frequency/plot_source_power_spectrum.py   |    7 +-
 .../plot_source_space_time_frequency.py            |    7 +-
 examples/time_frequency/plot_stockwell.py          |   50 +
 examples/time_frequency/plot_temporal_whitening.py |   16 +-
 .../plot_time_frequency_multitaper_sensors.py      |   55 +
 .../time_frequency/plot_time_frequency_sensors.py  |   13 +-
 .../plot_time_frequency_simulated.py               |  112 +
 examples/visualization/README.txt                  |    5 +
 examples/visualization/make_report.py              |   37 +
 .../plot_channel_epochs_image.py                   |   13 +-
 examples/visualization/plot_clickable_image.py     |   66 +
 .../{ => visualization}/plot_evoked_delayed_ssp.py |    5 +-
 examples/visualization/plot_evoked_erf_erp.py      |   51 +
 .../{ => visualization}/plot_evoked_topomap.py     |    9 +-
 .../plot_evoked_topomap_delayed_ssp.py             |    7 +-
 examples/visualization/plot_evoked_whitening.py    |   80 +
 .../{ => visualization}/plot_meg_eeg_fields_3d.py  |   19 +-
 .../plot_ssp_projs_sensitivity_map.py              |   10 +-
 .../{ => visualization}/plot_ssp_projs_topomaps.py |   12 +-
 .../plot_topo_channel_epochs_image.py              |    5 +-
 .../plot_topo_compare_conditions.py                |   19 +-
 .../{ => visualization}/plot_topo_customized.py    |    8 +-
 examples/{ => visualization}/plot_topography.py    |    6 +-
 logo/generate_mne_logos.py                         |  156 +
 make/install_python.ps1                            |   93 +
 mne/__init__.py                                    |   71 +-
 mne/_hdf5.py                                       |  167 -
 mne/baseline.py                                    |   14 +-
 mne/beamformer/__init__.py                         |    1 +
 mne/beamformer/_dics.py                            |   73 +-
 mne/beamformer/_lcmv.py                            |  176 +-
 mne/beamformer/_rap_music.py                       |  274 +
 mne/beamformer/tests/test_dics.py                  |   58 +-
 mne/beamformer/tests/test_lcmv.py                  |   92 +-
 mne/beamformer/tests/test_rap_music.py             |  152 +
 mne/bem.py                                         | 1660 +++++
 mne/channels.py                                    |  348 --
 mne/channels/__init__.py                           |   11 +
 mne/channels/channels.py                           |  783 +++
 mne/{ => channels/data}/layouts/CTF-275.lout       |    0
 mne/{ => channels/data}/layouts/CTF151.lay         |    0
 mne/{ => channels/data}/layouts/CTF275.lay         |    0
 mne/{ => channels/data}/layouts/EEG1005.lay        |    0
 mne/{ => channels/data}/layouts/EGI256.lout        |    0
 mne/{ => channels/data}/layouts/KIT-157.lout       |    2 +-
 mne/channels/data/layouts/KIT-AD.lout              |  209 +
 .../data}/layouts/Vectorview-all.lout              |    0
 .../data}/layouts/Vectorview-grad.lout             |    0
 .../data}/layouts/Vectorview-mag.lout              |    0
 mne/channels/data/layouts/biosemi.lay              |   64 +
 mne/{ => channels/data}/layouts/magnesWH3600.lout  |    0
 .../data/montages/10-5-System_Mastoids_EGI129.csd  |  467 ++
 mne/channels/data/montages/EGI_256.csd             |  258 +
 mne/channels/data/montages/GSN-HydroCel-128.sfp    |  131 +
 mne/channels/data/montages/GSN-HydroCel-129.sfp    |  132 +
 mne/channels/data/montages/GSN-HydroCel-256.sfp    |  259 +
 mne/channels/data/montages/GSN-HydroCel-257.sfp    |  260 +
 mne/channels/data/montages/GSN-HydroCel-32.sfp     |   36 +
 mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp |   67 +
 mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp |   68 +
 mne/channels/data/montages/biosemi128.txt          |  132 +
 mne/channels/data/montages/biosemi16.txt           |   20 +
 mne/channels/data/montages/biosemi160.txt          |  164 +
 mne/channels/data/montages/biosemi256.txt          |  260 +
 mne/channels/data/montages/biosemi32.txt           |   36 +
 mne/channels/data/montages/biosemi64.txt           |   68 +
 mne/channels/data/montages/easycap-M1.txt          |   75 +
 mne/channels/data/montages/easycap-M10.txt         |   62 +
 mne/channels/data/montages/standard_1005.elc       |  698 +++
 mne/channels/data/montages/standard_1020.elc       |  200 +
 mne/channels/data/montages/standard_alphabetic.elc |  142 +
 mne/channels/data/montages/standard_postfixed.elc  |  212 +
 mne/channels/data/montages/standard_prefixed.elc   |  160 +
 mne/channels/data/montages/standard_primed.elc     |  212 +
 mne/channels/data/neighbors/KIT-157_neighb.mat     |  Bin 0 -> 4939 bytes
 mne/channels/data/neighbors/KIT-208_neighb.mat     |  Bin 0 -> 6636 bytes
 mne/channels/data/neighbors/__init__.py            |    6 +
 mne/channels/data/neighbors/biosemi16_neighb.mat   |  Bin 0 -> 511 bytes
 mne/channels/data/neighbors/biosemi32_neighb.mat   |  Bin 0 -> 942 bytes
 mne/channels/data/neighbors/biosemi64_neighb.mat   |  Bin 0 -> 1812 bytes
 mne/channels/data/neighbors/bti148_neighb.mat      |  Bin 0 -> 3920 bytes
 mne/channels/data/neighbors/bti248_neighb.mat      |  Bin 0 -> 6577 bytes
 mne/channels/data/neighbors/bti248grad_neighb.mat  |  Bin 0 -> 8337 bytes
 mne/channels/data/neighbors/ctf151_neighb.mat      |  Bin 0 -> 4380 bytes
 mne/channels/data/neighbors/ctf275_neighb.mat      |  Bin 0 -> 7831 bytes
 mne/channels/data/neighbors/ctf64_neighb.mat       |  Bin 0 -> 2397 bytes
 .../data/neighbors/easycap128ch-avg_neighb.mat     |  Bin 0 -> 3870 bytes
 .../data/neighbors/easycap32ch-avg_neighb.mat      |  Bin 0 -> 1127 bytes
 .../data/neighbors/easycap64ch-avg_neighb.mat      |  Bin 0 -> 1861 bytes
 mne/channels/data/neighbors/easycapM11_neighb.mat  |  Bin 0 -> 1792 bytes
 mne/channels/data/neighbors/easycapM14_neighb.mat  |  Bin 0 -> 3529 bytes
 mne/channels/data/neighbors/easycapM15_neighb.mat  |  Bin 0 -> 3906 bytes
 mne/channels/data/neighbors/easycapM1_neighb.mat   |  Bin 0 -> 2145 bytes
 mne/channels/data/neighbors/neuromag122_neighb.mat |  Bin 0 -> 3400 bytes
 .../data/neighbors/neuromag306mag_neighb.mat       |  Bin 0 -> 2753 bytes
 .../data/neighbors/neuromag306planar_neighb.mat    |  Bin 0 -> 5580 bytes
 mne/channels/interpolation.py                      |  207 +
 mne/channels/layout.py                             |  825 +++
 mne/channels/montage.py                            |  533 ++
 .../channels/tests}/__init__.py                    |    0
 mne/channels/tests/test_channels.py                |  152 +
 mne/channels/tests/test_interpolation.py           |  120 +
 mne/channels/tests/test_layout.py                  |  380 ++
 mne/channels/tests/test_montage.py                 |  209 +
 mne/chpi.py                                        |  440 ++
 mne/commands/__init__.py                           |    1 +
 mne/commands/mne_browse_raw.py                     |   43 +-
 mne/commands/mne_bti2fiff.py                       |   35 +-
 mne/commands/mne_clean_eog_ecg.py                  |  100 +-
 mne/commands/mne_compare_fiff.py                   |   27 +
 mne/commands/mne_compute_proj_ecg.py               |   18 +-
 mne/commands/mne_compute_proj_eog.py               |   92 +-
 mne/commands/mne_coreg.py                          |    9 +-
 mne/commands/mne_flash_bem.py                      |   90 +
 mne/commands/mne_flash_bem_model.py                |   37 +-
 mne/commands/mne_freeview_bem_surfaces.py          |   92 +
 mne/commands/mne_kit2fiff.py                       |   10 +-
 mne/commands/mne_make_scalp_surfaces.py            |  135 +-
 mne/commands/mne_maxfilter.py                      |  110 +-
 mne/commands/mne_report.py                         |   49 +-
 mne/commands/mne_surf2bem.py                       |   14 +-
 mne/commands/mne_watershed_bem.py                  |   62 +
 mne/{layouts => commands}/tests/__init__.py        |    0
 mne/commands/tests/test_commands.py                |  244 +
 mne/commands/utils.py                              |   27 +-
 mne/connectivity/effective.py                      |   13 +-
 mne/connectivity/spectral.py                       |  219 +-
 mne/connectivity/tests/test_effective.py           |   12 +-
 mne/connectivity/tests/test_spectral.py            |  131 +-
 mne/coreg.py                                       |  232 +-
 mne/cov.py                                         | 1359 ++++-
 mne/cuda.py                                        |  302 +-
 mne/data/FreeSurferColorLUT.txt                    | 1397 +++++
 mne/data/coil_def.dat                              |   47 +-
 mne/data/coil_def_Elekta.dat                       |   70 +
 mne/data/image/custom_layout.lout                  |   24 +
 mne/data/image/mni_brain.gif                       |  Bin 0 -> 12051 bytes
 mne/datasets/__init__.py                           |    3 +
 mne/datasets/_fake/__init__.py                     |    4 +
 mne/datasets/_fake/_fake.py                        |   25 +
 mne/datasets/brainstorm/__init__.py                |    4 +
 mne/datasets/brainstorm/bst_auditory.py            |   60 +
 mne/datasets/brainstorm/bst_raw.py                 |   59 +
 mne/datasets/brainstorm/bst_resting.py             |   51 +
 mne/datasets/eegbci/eegbci.py                      |   72 +-
 mne/datasets/megsim/megsim.py                      |   75 +-
 mne/datasets/megsim/urls.py                        |   36 +-
 mne/datasets/sample/__init__.py                    |    3 +-
 mne/datasets/sample/sample.py                      |   31 +-
 mne/datasets/somato/__init__.py                    |    2 +-
 mne/datasets/somato/somato.py                      |   32 +-
 mne/datasets/spm_face/__init__.py                  |    2 +-
 mne/datasets/spm_face/spm_data.py                  |   30 +-
 mne/datasets/testing/__init__.py                   |    4 +
 mne/datasets/testing/_testing.py                   |   47 +
 .../datasets/tests}/__init__.py                    |    0
 mne/datasets/tests/test_datasets.py                |   46 +
 mne/datasets/utils.py                              |  294 +-
 mne/decoding/__init__.py                           |    7 +-
 mne/decoding/base.py                               |  622 ++
 mne/decoding/csp.py                                |  399 +-
 mne/decoding/ems.py                                |    2 +-
 mne/decoding/tests/test_csp.py                     |   31 +-
 mne/decoding/tests/test_ems.py                     |    6 +-
 mne/decoding/tests/test_time_gen.py                |  303 +-
 .../{test_classifier.py => test_transformer.py}    |   63 +-
 mne/decoding/time_gen.py                           | 1360 ++++-
 mne/decoding/{classifier.py => transformer.py}     |  201 +-
 mne/defaults.py                                    |   54 +
 mne/dipole.py                                      |  706 ++-
 mne/epochs.py                                      | 2509 ++++----
 mne/event.py                                       |  104 +-
 mne/evoked.py                                      |  936 +--
 mne/externals/FieldTrip.py                         |    4 +-
 mne/externals/__init__.py                          |    3 +-
 mne/externals/h5io/__init__.py                     |    6 +
 mne/externals/h5io/_h5io.py                        |  297 +
 mne/fiff/__init__.py                               |   79 -
 mne/filter.py                                      |  518 +-
 mne/fixes.py                                       |  374 +-
 mne/forward/__init__.py                            |   13 +-
 mne/forward/_compute_forward.py                    |  878 ++-
 mne/forward/_field_interpolation.py                |  195 +-
 mne/forward/_lead_dots.py                          |  270 +-
 mne/forward/_make_forward.py                       |  682 ++-
 mne/forward/forward.py                             |  503 +-
 mne/forward/tests/test_field_interpolation.py      |  126 +-
 mne/forward/tests/test_forward.py                  |  157 +-
 mne/forward/tests/test_make_forward.py             |  267 +-
 mne/gui/__init__.py                                |   16 +-
 mne/gui/_coreg_gui.py                              |   93 +-
 mne/gui/_fiducials_gui.py                          |   36 +-
 mne/gui/_file_traits.py                            |  151 +-
 mne/gui/_kit2fiff_gui.py                           |  184 +-
 mne/gui/_marker_gui.py                             |  113 +-
 mne/gui/_viewer.py                                 |   52 +-
 mne/gui/tests/test_coreg_gui.py                    |   52 +-
 mne/gui/tests/test_fiducials_gui.py                |   25 +-
 mne/gui/tests/test_file_traits.py                  |   61 +-
 mne/gui/tests/test_kit2fiff_gui.py                 |   30 +-
 mne/gui/tests/test_marker_gui.py                   |   21 +-
 mne/inverse_sparse/_gamma_map.py                   |   50 +-
 mne/inverse_sparse/mxne_debiasing.py               |   12 +-
 mne/inverse_sparse/mxne_inverse.py                 |  317 +-
 mne/inverse_sparse/mxne_optim.py                   |  712 ++-
 mne/inverse_sparse/tests/test_gamma_map.py         |   37 +-
 mne/inverse_sparse/tests/test_mxne_inverse.py      |   83 +-
 mne/inverse_sparse/tests/test_mxne_optim.py        |  181 +-
 mne/io/__init__.py                                 |   64 +-
 mne/io/array/array.py                              |   31 +-
 mne/io/array/tests/test_array.py                   |   34 +-
 mne/io/base.py                                     | 1397 +++--
 mne/io/brainvision/__init__.py                     |    2 +-
 mne/io/brainvision/brainvision.py                  |  660 +-
 mne/io/brainvision/tests/data/test.hpts            |   46 +
 mne/io/brainvision/tests/data/test.vmrk            |    5 +-
 mne/io/brainvision/tests/data/test_highpass.vhdr   |  142 +
 mne/io/brainvision/tests/test_brainvision.py       |  136 +-
 mne/io/bti/bti.py                                  |  892 +--
 mne/io/bti/constants.py                            |   12 +-
 mne/io/bti/read.py                                 |   97 +-
 mne/io/bti/tests/test_bti.py                       |  228 +-
 mne/io/bti/transforms.py                           |  102 -
 mne/io/constants.py                                |   64 +-
 mne/io/ctf.py                                      |   20 +-
 mne/io/diff.py                                     |    2 +-
 mne/io/edf/__init__.py                             |    2 +-
 mne/io/edf/edf.py                                  |  783 +--
 mne/io/edf/tests/data/biosemi.hpts                 |    8 +-
 mne/io/edf/tests/data/test_edf_eeglab.mat          |  Bin 799941 -> 524875 bytes
 mne/io/edf/tests/data/test_edf_stim_channel.edf    |  Bin 0 -> 68056 bytes
 mne/io/edf/tests/data/test_edf_stim_channel.txt    |  717 +++
 mne/io/edf/tests/data/test_uneven_samp.edf         |  Bin 0 -> 25584 bytes
 mne/io/edf/tests/data/test_uneven_samp.mat         |  Bin 0 -> 40487 bytes
 mne/io/edf/tests/test_edf.py                       |  187 +-
 mne/io/egi/egi.py                                  |  222 +-
 mne/io/egi/tests/test_egi.py                       |   24 +-
 mne/io/fiff/__init__.py                            |    3 +-
 mne/io/fiff/raw.py                                 |  465 +-
 mne/io/fiff/tests/test_raw.py                      |  504 +-
 mne/io/kit/__init__.py                             |   10 +-
 mne/io/kit/constants.py                            |   11 +-
 mne/io/kit/coreg.py                                |   94 +-
 mne/io/kit/kit.py                                  |  959 +--
 mne/io/kit/tests/data/test-epoch.raw               |  Bin 0 -> 138388 bytes
 mne/io/kit/tests/data/test-eve.txt                 |    2 +
 mne/io/kit/tests/test_coreg.py                     |   54 +-
 mne/io/kit/tests/test_kit.py                       |   69 +-
 mne/io/meas_info.py                                |  743 ++-
 mne/io/open.py                                     |   62 +-
 mne/io/pick.py                                     |  286 +-
 mne/io/proc_history.py                             |  290 +
 mne/io/proj.py                                     |  173 +-
 mne/io/reference.py                                |  387 ++
 mne/io/tag.py                                      |  127 +-
 mne/io/tests/data/test_ica.lout                    |    6 +-
 mne/io/tests/test_apply_function.py                |   58 +
 mne/io/tests/test_compensator.py                   |    5 +-
 mne/io/tests/test_meas_info.py                     |  153 +-
 mne/io/tests/test_pick.py                          |  177 +-
 mne/io/tests/test_proc_history.py                  |   47 +
 mne/io/tests/test_raw.py                           |   51 +
 mne/io/tests/test_reference.py                     |  307 +
 mne/io/tree.py                                     |   24 +-
 mne/io/write.py                                    |   85 +-
 mne/label.py                                       |  505 +-
 mne/layouts/__init__.py                            |    2 -
 mne/layouts/layout.py                              |  563 --
 mne/layouts/tests/test_layout.py                   |  218 -
 mne/minimum_norm/__init__.py                       |    3 +-
 mne/minimum_norm/inverse.py                        |  710 ++-
 mne/minimum_norm/psf_ctf.py                        |   93 +-
 mne/minimum_norm/tests/test_inverse.py             |  366 +-
 mne/minimum_norm/tests/test_psf_ctf.py             |  110 +-
 mne/minimum_norm/tests/test_snr.py                 |   42 +
 mne/minimum_norm/tests/test_time_frequency.py      |   91 +-
 mne/minimum_norm/time_frequency.py                 |  370 +-
 mne/misc.py                                        |   16 +-
 mne/mixed_norm/__init__.py                         |    7 -
 mne/parallel.py                                    |   20 +-
 mne/preprocessing/__init__.py                      |    7 +-
 mne/preprocessing/bads.py                          |   16 +-
 mne/preprocessing/ctps_.py                         |    2 +-
 mne/preprocessing/ecg.py                           |   98 +-
 mne/preprocessing/eog.py                           |   57 +-
 mne/preprocessing/ica.py                           | 1086 ++--
 mne/preprocessing/infomax_.py                      |  107 +-
 mne/preprocessing/maxfilter.py                     |   80 +-
 mne/preprocessing/maxwell.py                       |  644 ++
 mne/preprocessing/peak_finder.py                   |    4 +-
 mne/preprocessing/ssp.py                           |   27 +-
 mne/preprocessing/stim.py                          |  143 +-
 .../eeglab_extended_infomax_results_eeg_data.mat   |  Bin 0 -> 402 bytes
 .../eeglab_extended_infomax_results_meg_data.mat   |  Bin 0 -> 403 bytes
 .../tests/data/eeglab_infomax_results_eeg_data.mat |  Bin 0 -> 403 bytes
 .../tests/data/eeglab_infomax_results_meg_data.mat |  Bin 0 -> 404 bytes
 mne/preprocessing/tests/test_eeglab_infomax.py     |  204 +
 mne/preprocessing/tests/test_ica.py                |  119 +-
 mne/preprocessing/tests/test_infomax.py            |  153 +-
 mne/preprocessing/tests/test_maxwell.py            |  256 +
 mne/preprocessing/tests/test_ssp.py                |   22 +-
 mne/preprocessing/tests/test_stim.py               |  104 +-
 mne/preprocessing/tests/test_xdawn.py              |  145 +
 mne/preprocessing/xdawn.py                         |  484 ++
 mne/proj.py                                        |   83 +-
 mne/realtime/client.py                             |   16 +-
 mne/realtime/epochs.py                             |  164 +-
 mne/realtime/fieldtrip_client.py                   |   71 +-
 mne/realtime/mockclient.py                         |   39 +-
 mne/realtime/stim_server_client.py                 |   21 +-
 mne/realtime/tests/test_fieldtrip_client.py        |   71 +-
 mne/realtime/tests/test_mockclient.py              |  100 +-
 mne/realtime/tests/test_stim_client_server.py      |   45 +-
 mne/report.py                                      | 1035 +++-
 mne/selection.py                                   |   15 +-
 mne/simulation/__init__.py                         |    9 +-
 mne/simulation/evoked.py                           |  155 +-
 mne/simulation/metrics.py                          |   68 +
 mne/simulation/raw.py                              |  569 ++
 mne/simulation/source.py                           |  135 +-
 mne/simulation/tests/test_evoked.py                |   57 +-
 mne/simulation/tests/test_metrics.py               |   52 +
 mne/simulation/tests/test_raw.py                   |  248 +
 mne/simulation/tests/test_source.py                |  130 +-
 mne/source_estimate.py                             |  738 ++-
 mne/source_space.py                                | 1259 +++-
 mne/stats/__init__.py                              |    5 +-
 mne/stats/cluster_level.py                         |  240 +-
 mne/stats/multi_comp.py                            |    4 +-
 mne/stats/parametric.py                            |  211 +-
 mne/stats/regression.py                            |  210 +-
 mne/stats/tests/test_cluster_level.py              |  222 +-
 mne/stats/tests/test_multi_comp.py                 |    5 +-
 mne/stats/tests/test_parametric.py                 |  122 +-
 mne/stats/tests/test_regression.py                 |   65 +-
 mne/surface.py                                     |  552 +-
 mne/tests/__init__.py                              |    3 -
 mne/tests/test_bem.py                              |  264 +
 mne/tests/test_channels.py                         |  109 -
 mne/tests/test_chpi.py                             |  168 +
 mne/tests/test_coreg.py                            |   68 +-
 mne/tests/test_cov.py                              |  286 +-
 mne/tests/test_defaults.py                         |   22 +
 mne/tests/test_dipole.py                           |  262 +-
 mne/tests/test_docstring_parameters.py             |  160 +
 mne/tests/test_epochs.py                           |  905 ++-
 mne/tests/test_event.py                            |   86 +-
 mne/tests/test_evoked.py                           |  163 +-
 mne/tests/test_filter.py                           |  241 +-
 mne/tests/test_fixes.py                            |   66 +-
 mne/tests/test_hdf5.py                             |   26 -
 mne/tests/test_import_nesting.py                   |   53 +
 mne/tests/test_label.py                            |  382 +-
 mne/tests/test_proj.py                             |   90 +-
 mne/tests/test_report.py                           |  271 +-
 mne/tests/test_source_estimate.py                  |  204 +-
 mne/tests/test_source_space.py                     |  476 +-
 mne/tests/test_surface.py                          |  105 +-
 mne/tests/test_transforms.py                       |  163 +-
 mne/tests/test_utils.py                            |  334 +-
 mne/time_frequency/__init__.py                     |    7 +-
 mne/time_frequency/_stockwell.py                   |  255 +
 mne/time_frequency/ar.py                           |   75 +-
 mne/time_frequency/csd.py                          |   14 +-
 mne/time_frequency/multitaper.py                   |   35 +-
 mne/time_frequency/psd.py                          |  142 +-
 mne/time_frequency/stft.py                         |   29 +-
 mne/time_frequency/tests/test_ar.py                |   21 +-
 mne/time_frequency/tests/test_csd.py               |   22 +-
 mne/time_frequency/tests/test_multitaper.py        |    4 +-
 mne/time_frequency/tests/test_psd.py               |   97 +-
 mne/time_frequency/tests/test_stft.py              |   22 +-
 mne/time_frequency/tests/test_stockwell.py         |   96 +
 mne/time_frequency/tests/test_tfr.py               |  235 +-
 mne/time_frequency/tfr.py                          |  820 ++-
 mne/transforms.py                                  |  436 +-
 mne/utils.py                                       | 1163 ++--
 mne/viz/_3d.py                                     |  512 +-
 mne/viz/__init__.py                                |   32 +-
 mne/viz/circle.py                                  |   32 +-
 mne/viz/decoding.py                                |  236 +
 mne/viz/epochs.py                                  | 1390 ++++-
 mne/viz/evoked.py                                  |  579 +-
 mne/viz/ica.py                                     |  489 +-
 mne/viz/misc.py                                    |  119 +-
 mne/viz/montage.py                                 |   58 +
 mne/viz/raw.py                                     |  770 +--
 mne/viz/tests/test_3d.py                           |  167 +-
 mne/viz/tests/test_circle.py                       |    4 +-
 mne/viz/tests/test_decoding.py                     |  124 +
 mne/viz/tests/test_epochs.py                       |  110 +-
 mne/viz/tests/test_evoked.py                       |   57 +-
 mne/viz/tests/test_ica.py                          |  120 +-
 mne/viz/tests/test_misc.py                         |   65 +-
 mne/viz/tests/test_montage.py                      |   30 +
 mne/viz/tests/test_raw.py                          |   90 +-
 mne/viz/tests/test_topo.py                         |   58 +-
 mne/viz/tests/test_topomap.py                      |  265 +-
 mne/viz/tests/test_utils.py                        |   73 +-
 mne/viz/topo.py                                    |  481 +-
 mne/viz/topomap.py                                 | 1163 +++-
 mne/viz/utils.py                                   |  700 ++-
 setup.cfg                                          |    6 +-
 setup.py                                           |   36 +-
 tutorials/README.txt                               |    4 +
 .../plot_cluster_1samp_test_time_frequency.py      |   18 +-
 .../plot_cluster_methods_tutorial.py               |   15 +-
 .../plot_cluster_stats_spatio_temporal.py          |   15 +-
 .../plot_cluster_stats_spatio_temporal_2samp.py    |   14 +-
 ...tats_spatio_temporal_repeated_measures_anova.py |   47 +-
 .../plot_cluster_stats_time_frequency.py           |   33 +-
 ...stats_time_frequency_repeated_measures_anova.py |   43 +-
 tutorials/plot_creating_data_structures.py         |  180 +
 tutorials/plot_epochs_objects.py                   |  103 +
 .../plot_epochs_to_data_frame.py                   |    7 +-
 .../plot_ica_from_raw.py                           |   25 +-
 tutorials/plot_info.py                             |   90 +
 tutorials/plot_introduction.py                     |  373 ++
 tutorials/plot_modifying_data_inplace.py           |   74 +
 tutorials/plot_raw_objects.py                      |  133 +
 tutorials/plot_source_localization_basics.py       |   98 +
 .../plot_spatio_temporal_cluster_stats_sensor.py   |  193 +
 717 files changed, 70206 insertions(+), 32541 deletions(-)

diff --git a/.gitignore b/.gitignore
old mode 100755
new mode 100644
index a33eecd..8b91f82
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 *.pyc
 *.pyo
+*.sh
 *.so
 *.fif
 *.tar.gz
@@ -28,24 +29,27 @@ memmap*.dat
 tmp-*.w
 tmtags
 auto_examples
-MNE-sample-data*
-MNE-spm-face*
 MNE-eegbci-data*
+MNE-sample-data*
 MNE-somato-data*
+MNE-spm-face*
+MNE-testing-data*
+MNE-brainstorm-data*
 MEGSIM*
 build
 coverage
 
 dist/
 doc/_build/
-doc/build/
+doc/generated/
 doc/auto_examples/
+doc/auto_tutorials/
 doc/modules/generated/
-doc/source/generated/
 pip-log.txt
 .coverage
 tags
 doc/coverages
 doc/samples
+cover
 
 *.orig
diff --git a/.mailmap b/.mailmap
index fbf64aa..90afd41 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,23 +27,36 @@ Dan G. Wakeman <dgwakeman at gmail.com>
 Teon Brooks <teon.brooks at gmail.com>
 Teon Brooks <teon.brooks at gmail.com> Teon <teon at nyu.edu>
 Teon Brooks <teon.brooks at gmail.com> Teon Brooks <teon at nyu.edu>
+Teon Brooks <teon.brooks at gmail.com> <teon at nyu.edu>
 Romain Trachel <romain.trachel at inria.fr>
 Roman Goj <roman.goj at gmail.com>
 Andrew Dykstra <andrew.r.dykstra at gmail.com>
 Yousra BEKHTI <yousra.bekhti at gmail.com> Yoursa BEKHTI <ybekhti at is222485.intra.cea.fr>
 Yousra BEKHTI <yousra.bekhti at gmail.com> Yoursa BEKHTI <yousra.bekhti at gmail.com>
-Mainak Jas <mainakjas at gmail.com> Mainak <mainakjas at gmail.com>
 Alan Leggitt <leggitta3 at gmail.com> leggitta <leggitta3 at gmail.com>
 Praveen Sripad <pravsripad at gmail.com> prav <pravsripad at gmail.com>
 Praveen Sripad <pravsripad at gmail.com> prav <prav at prav-dell.(none)>
 Martin Billinger <martin.billinger at tugraz.at> kazemakase <kazemakase at users.noreply.github.com>
 Martin Billinger <martin.billinger at tugraz.at> Martin Billinger <flkazemakase at gmail.com>
 Martin Billinger <martin.billinger at tugraz.at> Martin <martin.billinger at tugraz.at>
-Mainak Jas <mainakjas at gmail.com> Mainak Jas <mainak at neuro.hut.fi>
+Mainak Jas <mainakjas at gmail.com> Mainak <mainakjas at gmail.com>
+Mainak Jas <mainakjas at gmail.com> mainakjas <mainakjas at users.noreply.github.com>
+Mainak Jas <mainakjas at gmail.com> Mainak Jas <mainak at neuro.hut.fi> <mainakjas at users.noreply.github.com>
 Dan G. Wakeman <dgwakeman at gmail.com> Daniel Wakeman <dwakeman at marcie.nmr.mgh.harvard.edu>
 Marmaduke Woodman <mmwoodman at gmail.com> maedoc <maedoc at mm.st>
 Brad Buran <bburan at galenea.com> Brad Buran <bburan at alum.mit.edu>
 Cathy Nangini <cnangini at gmail.com> CN <cnangini at gmail.com>
 Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk> Olaf Hauk <olaf at mac0086.local>
 Jean-Remi King <jeanremi.kibng+github at gmail.com> kingjr <jeanremi.kibng+github at gmail.com>
+Jean-Remi King <jeanremi.kibng+github at gmail.com> UMR9752 <jeanremi.king+github at gmail.com>
+Jean-Remi King <jeanremi.kibng+github at gmail.com> UMR9752 <umr9752 at umr9752-desktop.(none)>
+Jean-Remi King <jeanremi.kibng+github at gmail.com> kingjr <jeanremi.king+github at gmail.com>
 Roan LaPlante <aestrivex at gmail.com> aestrivex <aestrivex at gmail.com>
+Mark Wronkiewicz <wronk.mark at gmail.com> wronk <wronk.mark at gmail.com>
+Basile Pinsard <basile.pinsard at umontreal.ca>
+Clément Moutard <clement.moutard at gmail.com>
+Manoj Kumar <manojkumarsivaraj334 at gmail.com> MechCoder <manojkumarsivaraj334 at gmail.com>
+Ingoo Lee <dlsrnsladlek at naver.com> dlsrnsi <dlsrnsladlek at naver.com>
+Jona Sassenhagen <jona.sassenhagen at staff.uni-marburg.de>
+Jona Sassenhagen <jona.sassenhagen at gmail.com> jona-sassenhagen <jona.sassenhagen at gmail.com>
+Yousra Bekhti <yousra.bekhti at gmail.com> Yousra BEKHTI <yousra.bekhti at gmail.com>
diff --git a/.travis.yml b/.travis.yml
index 0d16828..ffaf453 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,71 +1,145 @@
 language: python
 
+# Use container-based infrastructure
+sudo: false
+
 env:
     # Enable python 2 and python 3 builds
     # DEPS=full: build optional dependencies: pandas, nitime, statsmodels,
-    #            scikit-learn, patsy, nibabel; in the case of Python 2, also
-    #            nitime
+    #            scikit-learn, patsy, nibabel pillow;
+    #            in the case of Python 2, also mayavi, traits, pysurfer
     # DEPS=minimal: don't build optional dependencies; tests that require those
     #               dependencies are supposed to be skipped
-    - PYTHON=2.7 DEPS=full
-    - PYTHON=3.3 DEPS=full
-    - PYTHON=2.6 DEPS=full
-    - PYTHON=2.7 DEPS=minimal
+    #
+    # Note that we don't run coverage on Py3k anyway because it slows our tests
+    # by a factor of 2 (!), so we make this our "from install dir" run.
+    #
+    # If we change the old-version run to be a different Python version
+    # from 2.6, then we need to update mne.utils.clean_warning_registry.
+    #
+    # Run one test (3.4) with a non-default stim channel to make sure our
+    # tests are explicit about channels.
+    #
+    # Must force libpng version to avoid silly libpng.so.15 error (MPL 1.1 needs it)
+    #
+    # Conda currently has packaging bug with mayavi/traits/numpy where 1.10 can't be used
+    # but breaks scipy; hopefully eventually the NUMPY=1.9 on 2.7 full can be removed
+    - PYTHON=2.7 DEPS=full TEST_LOCATION=src NUMPY="=1.9" SCIPY="=0.16"
+    - PYTHON=2.7 DEPS=nodata TEST_LOCATION=src MNE_DONTWRITE_HOME=true  # also runs flake8
+    - PYTHON=3.4 DEPS=full TEST_LOCATION=install MNE_STIM_CHANNEL=STI101
+    - PYTHON=2.6 DEPS=full TEST_LOCATION=src NUMPY="=1.7" SCIPY="=0.11" MPL="=1.1" LIBPNG="=1.5" SKLEARN="=0.11" PANDAS="=0.8"
+    - PYTHON=2.7 DEPS=minimal TEST_LOCATION=src
+
 # Setup anaconda
 before_install:
-  - wget http://repo.continuum.io/miniconda/Miniconda-2.2.2-Linux-x86_64.sh -O miniconda.sh
+  - wget -q http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
   - chmod +x miniconda.sh
   - ./miniconda.sh -b
-  - export PATH=/home/travis/anaconda/bin:$PATH
-  - conda update --yes conda
-  # The next couple lines fix a crash with multiprocessing on Travis and are not specific to using Miniconda
-  - sudo rm -rf /dev/shm
-  - sudo ln -s /run/shm /dev/shm
+  - export PATH=/home/travis/miniconda/bin:$PATH
+  - conda update --yes --quiet conda
+  # We need to create a (fake) display on Travis (allows Mayavi tests to run)
+  - export DISPLAY=:99.0
+  - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset
 
 install:
     - conda create -n testenv --yes pip python=$PYTHON
     - source activate testenv
-    - conda install --yes ipython==1.1.0 numpy scipy nose matplotlib
+    - ENSURE_PACKAGES="numpy$NUMPY scipy$SCIPY matplotlib$MPL libpng$LIBPNG"
+    - conda install --yes --quiet $ENSURE_PACKAGES nose coverage
+    # We have to replicate e.g. numpy$NUMPY to ensure the recommended (higher) versions
+    # are not automatically installed below with multiple "conda install" calls!
     - if [ "${DEPS}" == "full" ]; then
-        conda install --yes pandas statsmodels scikit-learn patsy pytables;
-        pip install nibabel;
-        if [ ${PYTHON:0:1} == "2" ]; then
-          pip install nitime;
+        curl http://lester.ilabs.uw.edu/files/minimal_cmds.tar.gz | tar xz;
+        export MNE_ROOT="${PWD}/minimal_cmds";
+        export NEUROMAG2FT_ROOT="${PWD}/minimal_cmds/bin";
+        source ${MNE_ROOT}/bin/mne_setup_sh;
+        conda install --yes --quiet $ENSURE_PACKAGES pandas$PANDAS scikit-learn$SKLEARN patsy h5py pillow;
+        pip install -q joblib nibabel;
+        if [ "${PYTHON}" == "3.4" ]; then
+          conda install --yes --quiet $ENSURE_PACKAGES ipython;
+        else
+          conda install --yes --quiet $ENSURE_PACKAGES ipython==1.1.0 statsmodels pandas$PANDAS;
+          pip install -q nitime;
+          if [ "${PYTHON}" == "2.7" ]; then
+            conda install --yes --quiet $ENSURE_PACKAGES mayavi traits;
+            pip install -q pysurfer faulthandler;
+          fi;
         fi;
       fi;
-    - pip install coverage; pip install coveralls; pip install nose-timer
-    - MNE_FORCE_SERIAL=1
-    - MNE_SKIP_SAMPLE_DATASET_TESTS=1
-    # Skip tests that require large downloads over the network to save bandwith
-    # usage as travis workers are stateless and therefore traditional local
-    # disk caching does not work.
-    - export MNE_SKIP_NETWORK_TESTS=1
+    - if [ "${DEPS}" == "nodata" ]; then
+        pip install -q flake8;
+      fi;
+    - pip install -q coveralls nose-timer
+    # check our versions for the major packages
+    - NP_VERSION=`python -c 'import numpy; print(numpy.__version__)'`
+    - if [ -n "$NUMPY" ] && [ "${NUMPY:(-3)}" != "${NP_VERSION::3}" ]; then
+        echo "Incorrect numpy version $NP_VERSION";
+        exit 1;
+      fi;
+    - SP_VERSION=`python -c 'import scipy; print(scipy.__version__)'`
+    - if [ -n "$SCIPY" ] && [ "${SCIPY:(-4)}" != "${SP_VERSION::4}" ]; then
+        echo "Incorrect scipy version $SP_VERSION";
+        exit 1;
+      fi;
+    - MPL_VERSION=`python -c 'import matplotlib; print(matplotlib.__version__)'`
+    - if [ -n "$MPL" ] && [ "${MPL:(-3)}" != "${MPL_VERSION::3}" ]; then
+        echo "Incorrect matplotlib version $MPL_VERSION";
+        exit 1;
+      fi;
+    # Suppress the parallel outputs for logging cleanliness
+    - export MNE_LOGGING_LEVEL=warning
     - python setup.py build
     - python setup.py install
     - myscripts='browse_raw bti2fiff surf2bem'
-    - for script in $myscripts; do mne $script --help >/dev/null; done;
+    - for script in $myscripts; do mne $script --help; done;
     - SRC_DIR=$(pwd)
     - cd ~
+    # Trigger download of testing data. Note that
+    # the testing dataset has been constructed to contain the necessary
+    # files to act as a FREESURFER_HOME for the coreg tests
+    - if [ "${DEPS}" != "nodata" ]; then
+        python -c 'import mne; mne.datasets.testing.data_path(verbose=True)';
+        if [ "${DEPS}" == "full" ]; then
+          export FREESURFER_HOME=$(python -c 'import mne; print(mne.datasets.testing.data_path())');
+        fi;
+      else
+        export MNE_SKIP_TESTING_DATASET_TESTS=true;
+      fi;
     - MNE_DIR=$(python -c 'import mne;print(mne.__path__[0])')
-    - ln -s ${SRC_DIR}/mne/io/tests/data ${MNE_DIR}/io/tests/data
-    - ln -s ${SRC_DIR}/mne/io/bti/tests/data ${MNE_DIR}/io/bti/tests/data
-    - ln -s ${SRC_DIR}/mne/io/edf/tests/data ${MNE_DIR}/io/edf/tests/data
-    - ln -s ${SRC_DIR}/mne/io/kit/tests/data ${MNE_DIR}/io/kit/tests/data
-    - ln -s ${SRC_DIR}/mne/io/brainvision/tests/data ${MNE_DIR}/io/brainvision/tests/data
-    - ln -s ${SRC_DIR}/mne/io/egi/tests/data ${MNE_DIR}/io/egi/tests/data
-    - ln -s ${SRC_DIR}/setup.cfg ${MNE_DIR}/../setup.cfg
-    - ln -s ${SRC_DIR}/.coveragerc ${MNE_DIR}/../.coveragerc
-    # Link coverage to src dir, coveralls should be run from there (needs git calls)
-    - ln -s ${MNE_DIR}/../.coverage ${SRC_DIR}/.coverage
+    # We run two versions: one out of the source directory (that makes
+    # coveralls coverage work), and one out of the install directory (that
+    # ensures we have included all necessary files).
+    - if [ "${TEST_LOCATION}" == "install" ]; then
+        ln -s ${SRC_DIR}/mne/io/tests/data ${MNE_DIR}/io/tests/data;
+        ln -s ${SRC_DIR}/mne/io/bti/tests/data ${MNE_DIR}/io/bti/tests/data;
+        ln -s ${SRC_DIR}/mne/io/edf/tests/data ${MNE_DIR}/io/edf/tests/data;
+        ln -s ${SRC_DIR}/mne/io/kit/tests/data ${MNE_DIR}/io/kit/tests/data;
+        ln -s ${SRC_DIR}/mne/io/brainvision/tests/data ${MNE_DIR}/io/brainvision/tests/data;
+        ln -s ${SRC_DIR}/mne/io/egi/tests/data ${MNE_DIR}/io/egi/tests/data;
+        ln -s ${SRC_DIR}/mne/preprocessing/tests/data ${MNE_DIR}/preprocessing/tests/data;
+        ln -s ${SRC_DIR}/setup.cfg ${MNE_DIR}/../setup.cfg;
+        ln -s ${SRC_DIR}/.coveragerc ${MNE_DIR}/../.coveragerc;
+        cd ${MNE_DIR}/../;
+      else
+        cd ${SRC_DIR};
+      fi;
+    - if [ "${PYTHON}" != "3.4" ]; then
+        COVERAGE=--with-coverage;
+      else
+        COVERAGE=;
+      fi;
 
 script:
-    # Suppress the parallel outputs for logging cleanliness
-    - export MNE_LOGGING_LEVEL=warning
-    - cd ${MNE_DIR}/../
-    - nosetests --with-timer --timer-top-n 30;
+    - nosetests -a '!ultra_slow_test' --with-timer --timer-top-n 30 --verbosity=2 $COVERAGE
+    - if [ "${DEPS}" == "nodata" ]; then
+        make flake;
+      fi;
 
 after_success:
     # Need to run from source dir to exectue "git" commands
-    - echo "Running coveralls";
-    - cd ${SRC_DIR};
-    - coveralls;
+    # Coverage not collected for 3.4, so don't report it
+    - if [ "${TEST_LOCATION}" == "src" ] && [ "${PYTHON}" != "3.4" ]; then
+        echo "Running coveralls";
+        cd ${SRC_DIR};
+        coveralls;
+      fi;
diff --git a/MANIFEST.in b/MANIFEST.in
index 6e868f4..761a7a9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,17 +7,22 @@ recursive-include mne *.py
 recursive-include mne/data *.dat
 recursive-include mne/data *.sel
 recursive-include mne/data *.fif.gz
-recursive-include mne/layouts *.lout
-recursive-include mne/layouts *.lay
+recursive-include mne/channels/data/montages *.elc
+recursive-include mne/channels/data/montages *.txt
+recursive-include mne/channels/data/montages *.csd
+recursive-include mne/channels/data/montages *.sfp
+recursive-include mne/channels/data/layouts *.lout
+recursive-include mne/channels/data/layouts *.lay
+recursive-include mne/channels/data/neighbors *.mat
+recursive-include mne/preprocessing/tests/data *.mat
 recursive-include mne/html *.js
 recursive-include mne/html *.css
 recursive-exclude examples/MNE-sample-data *
 recursive-exclude examples/MNE-testing-data *
 recursive-exclude examples/MNE-spm-face *
 recursive-exclude examples/MNE-somato-data *
-# recursive-include mne/fiff/tests/data *
-recursive-exclude mne/fiff/tests/data *
-recursive-exclude mne/fiff/bti/tests/data *
-recursive-exclude mne/fiff/kit/tests/data *
-recursive-exclude mne/fiff/edf/tests/data *
-recursive-exclude mne/fiff/brainvision/tests/data *
+# recursive-exclude mne/io/tests/data *
+# recursive-exclude mne/io/bti/tests/data *
+# recursive-exclude mne/io/kit/tests/data *
+# recursive-exclude mne/io/edf/tests/data *
+# recursive-exclude mne/io/brainvision/tests/data *
diff --git a/Makefile b/Makefile
index 2fd3513..c766d51 100755
--- a/Makefile
+++ b/Makefile
@@ -16,7 +16,7 @@ clean-so:
 	find . -name "*.pyd" | xargs rm -f
 
 clean-build:
-	rm -rf build
+	rm -rf _build
 
 clean-ctags:
 	rm -f tags
@@ -30,41 +30,43 @@ in: inplace # just a shortcut
 inplace:
 	$(PYTHON) setup.py build_ext -i
 
-sample_data: $(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif
-	@echo "Target needs sample data"
+sample_data:
+	@python -c "import mne; mne.datasets.sample.data_path(verbose=True);"
 
-$(CURDIR)/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif:
-	wget -c ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
-	tar xvzf MNE-sample-data-processed.tar.gz
-	mv MNE-sample-data examples/
-	ln -sf ${PWD}/examples/MNE-sample-data ${PWD}/MNE-sample-data
+testing_data:
+	@python -c "import mne; mne.datasets.testing.data_path(verbose=True);"
 
-test: in sample_data
+test: in
 	rm -f .coverage
-	$(NOSETESTS) mne
+	$(NOSETESTS) -a '!ultra_slow_test' mne
 
-test-no-sample: in
-	@MNE_SKIP_SAMPLE_DATASET_TESTS=true \
+test-full: in
+	rm -f .coverage
 	$(NOSETESTS) mne
 
+test-no-network: in
+	sudo unshare -n -- sh -c 'MNE_SKIP_NETWORK_TESTS=1 nosetests mne'
+
+test-no-testing-data: in
+	@MNE_SKIP_TESTING_DATASET_TESTS=true \
+	$(NOSETESTS) mne
 
-test-no-sample-with-coverage: in
+test-no-sample-with-coverage: in testing_data
 	rm -rf coverage .coverage
-	@MNE_SKIP_SAMPLE_DATASET_TESTS=true \
 	$(NOSETESTS) --with-coverage --cover-package=mne --cover-html --cover-html-dir=coverage
 
-test-doc: sample_data
-	$(NOSETESTS) --with-doctest --doctest-tests --doctest-extension=rst doc/ doc/source/
+test-doc: sample_data testing_data
+	$(NOSETESTS) --with-doctest --doctest-tests --doctest-extension=rst doc/
 
-test-coverage: sample_data
+test-coverage: testing_data
 	rm -rf coverage .coverage
 	$(NOSETESTS) --with-coverage --cover-package=mne --cover-html --cover-html-dir=coverage
 
-test-profile: sample_data
+test-profile: testing_data
 	$(NOSETESTS) --with-profile --profile-stats-file stats.pf mne
 	hotshot2dot stats.pf | dot -Tpng -o profile.png
 
-test-mem: in sample_data
+test-mem: in testing_data
 	ulimit -v 1097152 && $(NOSETESTS)
 
 trailing-spaces:
@@ -78,16 +80,34 @@ ctags:
 upload-pipy:
 	python setup.py sdist bdist_egg register upload
 
+flake:
+	@if command -v flake8 > /dev/null; then \
+		echo "Running flake8"; \
+		flake8 --count mne examples; \
+	else \
+		echo "flake8 not found, please install it!"; \
+		exit 1; \
+	fi;
+	@echo "flake8 passed"
+
 codespell:
 	# The *.fif had to be there twice to be properly ignored (!)
 	codespell.py -w -i 3 -S="*.fif,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.coverage,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii" ./dictionary.txt -r .
 
 manpages:
 	@echo "I: generating manpages"
-	set -e; mkdir -p build/manpages && \
+	set -e; mkdir -p _build/manpages && \
 	cd bin && for f in mne*; do \
 			descr=$$(grep -h -e "^ *'''" -e 'DESCRIP =' $$f -h | sed -e "s,.*' *\([^'][^']*\)'.*,\1,g" | head -n 1); \
 	PYTHONPATH=../ \
 			help2man -n "$$descr" --no-discard-stderr --no-info --version-string "$(uver)" ./$$f \
-			>| ../build/manpages/$$f.1; \
+			>| ../_build/manpages/$$f.1; \
 	done
+
+build-doc-dev:
+	cd doc; make clean
+	cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html_dev
+
+build-doc-stable:
+	cd doc; make clean
+	cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html_stable
diff --git a/README.rst b/README.rst
index bc77ba5..be2df09 100644
--- a/README.rst
+++ b/README.rst
@@ -1,11 +1,19 @@
 .. -*- mode: rst -*-
 
 
-|Travis|_
+|Travis|_ |Appveyor|_ |Coveralls|_ |Zenodo|_
 
 .. |Travis| image:: https://api.travis-ci.org/mne-tools/mne-python.png?branch=master
 .. _Travis: https://travis-ci.org/mne-tools/mne-python
 
+.. |Appveyor| image:: https://ci.appveyor.com/api/projects/status/reccwk3filrasumg/branch/master?svg=true
+.. _Appveyor: https://ci.appveyor.com/project/Eric89GXL/mne-python/branch/master
+
+.. |Coveralls| image:: https://coveralls.io/repos/mne-tools/mne-python/badge.png?branch=master
+.. _Coveralls: https://coveralls.io/r/mne-tools/mne-python?branch=master
+
+.. |Zenodo| image:: https://zenodo.org/badge/5822/mne-tools/mne-python.svg
+.. _Zenodo: http://dx.doi.org/10.5281/zenodo.17856
 
 `mne-python <http://martinos.org/mne/mne-python.html>`_
 =======================================================
@@ -40,7 +48,7 @@ To get the latest code using git, simply type::
     git clone git://github.com/mne-tools/mne-python.git
 
 If you don't have git installed, you can download a zip or tarball
-of the latest code: http://github.com/mne-tools/mne-python/archives/master
+of the latest code: https://github.com/mne-tools/mne-python/archives/master
 
 Install mne-python
 ^^^^^^^^^^^^^^^^^^
@@ -73,7 +81,7 @@ Dependencies
 The required dependencies to build the software are python >= 2.6,
 NumPy >= 1.6, SciPy >= 0.7.2 and matplotlib >= 0.98.4.
 
-Some isolated functions require pandas >= 0.7.3 and nitime (multitaper analysis).
+Some isolated functions require pandas >= 0.7.3.
 
 To run the tests you will also need nose >= 0.10.
 and the MNE sample dataset (will be downloaded automatically
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..82e70d4
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,36 @@
+# CI on Windows via appveyor
+# This file was based on Olivier Grisel's python-appveyor-demo
+
+environment:
+
+  matrix:
+    - PYTHON: "C:\\Python27-conda64"
+      PYTHON_VERSION: "2.7"
+      PYTHON_ARCH: "64"
+
+install:
+  # Install miniconda Python
+  - "powershell ./make/install_python.ps1"
+
+  # Prepend newly installed Python to the PATH of this build (this cannot be
+  # done from inside the powershell script as it would require to restart
+  # the parent CMD process).
+  - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+
+  # Check that we have the expected version and architecture for Python
+  - "python --version"
+  - "python -c \"import struct; print(struct.calcsize('P') * 8)\""
+
+  # Install the dependencies of the project (skip nibabel for speed)
+  - "conda install --yes --quiet setuptools numpy scipy matplotlib scikit-learn nose mayavi pandas h5py PIL patsy"
+  - "pip install nose-timer nibabel nitime"
+  - "python setup.py develop"
+  - "SET MNE_SKIP_NETWORK_TESTS=1"
+  - "SET MNE_LOGGING_LEVEL=warning"
+  - "python -c \"import mne; mne.datasets.testing.data_path()\""
+
+build: false  # Not a C# project, build stuff at the test step instead.
+
+test_script:
+  # Run the project tests, but (sadly) exclude ones that take a long time
+  - "nosetests --verbosity=2 -a !slow_test --with-timer --timer-top-n=20 --timer-ok 5 --timer-warning 15"
diff --git a/bin/mne b/bin/mne
index a8169fe..4744cfb 100755
--- a/bin/mne
+++ b/bin/mne
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# -*- coding: utf-8 -*-
 
 import sys
 import glob
diff --git a/doc/Makefile b/doc/Makefile
index 0911f13..7053c66 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -9,101 +9,101 @@ PAPER         =
 # Internal variables.
 PAPEROPT_a4     = -D latex_paper_size=a4
 PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+ALLSPHINXOPTS   = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
 
 .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
 
 help:
 	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html      to make standalone HTML files (stable version)"
-	@echo "  dev_html  to make standalone HTML files (dev version)"
-	@echo "  dirhtml   to make HTML files named index.html in directories"
-	@echo "  pickle    to make pickle files"
-	@echo "  json      to make JSON files"
-	@echo "  htmlhelp  to make HTML files and a HTML help project"
-	@echo "  qthelp    to make HTML files and a qthelp project"
-	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  changes   to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck to check all external links for integrity"
-	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+	@echo "  html_stable  to make standalone HTML files (stable version)"
+	@echo "  html_dev     to make standalone HTML files (dev version)"
+	@echo "  *-noplot     to make standalone HTML files without plotting"
+	@echo "  dirhtml      to make HTML files named index.html in directories"
+	@echo "  pickle       to make pickle files"
+	@echo "  json         to make JSON files"
+	@echo "  htmlhelp     to make HTML files and a HTML help project"
+	@echo "  qthelp       to make HTML files and a qthelp project"
+	@echo "  latex        to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes      to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck    to check all external links for integrity"
+	@echo "  doctest      to run all doctests embedded in the documentation (if enabled)"
 
 clean:
-	-rm -rf build/*
-	-rm -rf source/auto_examples
-	-rm -rf source/generated
+	-rm -rf _build/*
+	-rm -rf auto_examples
+	-rm -rf auto_tutorials
+	-rm -rf generated
 	-rm -rf *.stc
 	-rm -rf *.fif
 	-rm -rf *.nii.gz
 
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+html_stable:
+	$(SPHINXBUILD) -D raise_gallery=1 -b html $(ALLSPHINXOPTS) build/html_stable
 	@echo
-	@echo "Build finished. The HTML pages are in build/html."
+	@echo "Build finished. The HTML pages are in _build/html_stable."
 
 html_dev:
-	BUILD_DEV_HTML=1 $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	BUILD_DEV_HTML=1 $(SPHINXBUILD) -D raise_gallery=1 -b html $(ALLSPHINXOPTS) _build/html
 	@echo
-	@echo "Build finished. The HTML pages are in build/html."
+	@echo "Build finished. The HTML pages are in _build/html"
 
-html-noplot:
-	$(SPHINXBUILD) -D plot_gallery=False -b html $(ALLSPHINXOPTS) build/html
-	@echo
-	@echo "Build finished. The HTML pages are in build/html/stable."
+html_dev-noslow:
+	BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=fast -b html $(ALLSPHINXOPTS) _build/html
 
 html_dev-noplot:
-	BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=False -b html $(ALLSPHINXOPTS) build/html
+	BUILD_DEV_HTML=1 $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) _build/html
 	@echo
-	@echo "Build finished. The HTML pages are in build/html/stable."
+	@echo "Build finished. The HTML pages are in _build/html."
 
 dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) build/dirhtml
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml
 	@echo
-	@echo "Build finished. The HTML pages are in build/dirhtml."
+	@echo "Build finished. The HTML pages are in _build/dirhtml."
 
 pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle
 	@echo
 	@echo "Build finished; now you can process the pickle files."
 
 json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json
 	@echo
 	@echo "Build finished; now you can process the JSON files."
 
 htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp
 	@echo
 	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in build/htmlhelp."
+	      ".hhp project file in _build/htmlhelp."
 
 qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp
 	@echo
 	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in build/qthelp, like this:"
-	@echo "# qcollectiongenerator build/qthelp/MNE.qhcp"
+	      ".qhcp project file in _build/qthelp, like this:"
+	@echo "# qcollectiongenerator _build/qthelp/MNE.qhcp"
 	@echo "To view the help file:"
-	@echo "# assistant -collectionFile build/qthelp/MNE.qhc"
+	@echo "# assistant -collectionFile _build/qthelp/MNE.qhc"
 
 latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
 	@echo
-	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Build finished; the LaTeX files are in _build/latex."
 	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
 	      "run these through (pdf)latex."
 
 changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
 	@echo
-	@echo "The overview file is in build/changes."
+	@echo "The overview file is in _build/changes."
 
 linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck
 	@echo
 	@echo "Link check complete; look for any errors in the above output " \
-	      "or in build/linkcheck/output.txt."
+	      "or in _build/linkcheck/output.txt."
 
 doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) build/doctest
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest
 	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in build/doctest/output.txt."
+	      "results in _build/doctest/output.txt."
diff --git a/doc/source/_static/branch_dropdown.png b/doc/_static/branch_dropdown.png
similarity index 100%
rename from doc/source/_static/branch_dropdown.png
rename to doc/_static/branch_dropdown.png
diff --git a/doc/source/_static/favicon.ico b/doc/_static/favicon.ico
similarity index 100%
rename from doc/source/_static/favicon.ico
rename to doc/_static/favicon.ico
diff --git a/doc/_static/flow_diagram.svg b/doc/_static/flow_diagram.svg
new file mode 100644
index 0000000..783f5e4
--- /dev/null
+++ b/doc/_static/flow_diagram.svg
@@ -0,0 +1,204 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+ "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by graphviz version 2.38.0 (20140413.2041)
+ -->
+<!-- Title: %3 Pages: 1 -->
+<svg width="543pt" height="483pt"
+ viewBox="0.00 0.00 543.27 483.09" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(15.15 479.09)">
+<title>%3</title>
+<polygon fill="white" stroke="none" points="-15.15,4 -15.15,-479.09 528.12,-479.09 528.12,4 -15.15,4"/>
+<g id="clust1" class="cluster"><title>cluster0</title>
+<polygon fill="none" stroke="black" points="302.12,-227.4 302.12,-475.09 524.12,-475.09 524.12,-227.4 302.12,-227.4"/>
+<text text-anchor="middle" x="413.12" y="-459.899" font-family="Arial" font-size="14.00">Structural information</text>
+</g>
+<!-- src -->
+<g id="node1" class="node"><title>src</title>
+<polygon fill="#ff6347" stroke="none" points="401.998,-271.4 312.242,-271.4 312.242,-235.4 401.998,-235.4 401.998,-271.4"/>
+<text text-anchor="start" x="320.432" y="-254.8" font-family="Arial" font-size="12.00">Source space</text>
+<text text-anchor="start" x="322.212" y="-246" font-family="Arial" font-style="italic" font-size="8.00">mne.SourceSpaces</text>
+</g>
+<!-- fwd -->
+<g id="node6" class="node"><title>fwd</title>
+<defs>
+<linearGradient id="l_0" gradientUnits="userSpaceOnUse" x1="336.12" y1="-190.2" x2="336.12" y2="-154.2" >
+<stop offset="0" style="stop-color:#ff6347;stop-opacity:1.;"/>
+<stop offset="1" style="stop-color:#7bbeca;stop-opacity:1.;"/>
+</linearGradient>
+</defs>
+<polygon fill="url(#l_0)" stroke="none" points="388.62,-190.2 283.62,-190.2 283.62,-154.2 388.62,-154.2 388.62,-190.2"/>
+<text text-anchor="start" x="291.767" y="-173.6" font-family="Arial" font-size="12.00">Forward solution</text>
+<text text-anchor="start" x="298.108" y="-164.8" font-family="Arial" font-style="italic" font-size="8.00">mne.forward.Forward</text>
+</g>
+<!-- src->fwd -->
+<g id="edge5" class="edge"><title>src->fwd</title>
+<path fill="none" stroke="black" d="M352.57,-235.21C349.84,-224.94 346.32,-211.66 343.24,-200.03"/>
+<polygon fill="black" stroke="black" points="346.619,-199.117 340.67,-190.35 339.853,-200.913 346.619,-199.117"/>
+</g>
+<!-- bem -->
+<g id="node2" class="node"><title>bem</title>
+<polygon fill="#ff6347" stroke="none" points="474.12,-271.4 420.12,-271.4 420.12,-235.4 474.12,-235.4 474.12,-271.4"/>
+<text text-anchor="start" x="434.118" y="-250.699" font-family="Arial" font-size="12.00">BEM</text>
+</g>
+<!-- bem->fwd -->
+<g id="edge6" class="edge"><title>bem->fwd</title>
+<path fill="none" stroke="black" d="M423.04,-235.21C407.04,-223.8 385.84,-208.67 368.39,-196.22"/>
+<polygon fill="black" stroke="black" points="370.334,-193.307 360.16,-190.35 366.269,-199.006 370.334,-193.307"/>
+</g>
+<!-- recon -->
+<g id="node3" class="node"><title>recon</title>
+<polygon fill="#ff6347" stroke="none" points="430.153,-352.6 310.087,-352.6 310.087,-316.6 430.153,-316.6 430.153,-352.6"/>
+<text text-anchor="start" x="318.103" y="-331.899" font-family="Arial" font-size="12.00">Freesurfer surfaces</text>
+</g>
+<!-- recon->src -->
+<g id="edge1" class="edge"><title>recon->src</title>
+<path fill="none" stroke="black" d="M367.26,-316.51C366.31,-310.81 365.25,-304.43 364.29,-298.6 363.4,-293.18 362.46,-287.38 361.55,-281.83"/>
+<polygon fill="black" stroke="black" points="364.949,-280.924 359.9,-271.61 358.039,-282.04 364.949,-280.924"/>
+<text text-anchor="middle" x="402.04" y="-292.2" font-family="Arial" font-size="8.00">setup_source_space</text>
+</g>
+<!-- recon->bem -->
+<g id="edge2" class="edge"><title>recon->bem</title>
+<path fill="none" stroke="black" d="M417.11,-316.55C425.1,-311.81 432.59,-305.9 438.12,-298.6 441.82,-293.72 444.09,-287.7 445.48,-281.69"/>
+<polygon fill="black" stroke="black" points="448.94,-282.215 447.05,-271.79 442.027,-281.118 448.94,-282.215"/>
+</g>
+<!-- T1 -->
+<g id="node4" class="node"><title>T1</title>
+<polygon fill="#ff6347" stroke="none" points="397.12,-443 343.12,-443 343.12,-407 397.12,-407 397.12,-443"/>
+<text text-anchor="start" x="363.118" y="-422.299" font-family="Arial" font-size="12.00">T1</text>
+</g>
+<!-- T1->recon -->
+<g id="edge3" class="edge"><title>T1->recon</title>
+<path fill="none" stroke="black" d="M370.12,-406.95C370.12,-394.56 370.12,-377.51 370.12,-363.19"/>
+<polygon fill="black" stroke="black" points="373.62,-362.89 370.12,-352.89 366.62,-362.89 373.62,-362.89"/>
+</g>
+<!-- flashes -->
+<g id="node5" class="node"><title>flashes</title>
+<polygon fill="#ff6347" stroke="none" points="516.62,-352.6 447.62,-352.6 447.62,-316.6 516.62,-316.6 516.62,-352.6"/>
+<text text-anchor="start" x="455.77" y="-331.899" font-family="Arial" font-size="12.00">Flash5/30</text>
+</g>
+<!-- flashes->bem -->
+<g id="edge4" class="edge"><title>flashes->bem</title>
+<path fill="none" stroke="black" d="M474.53,-316.41C469.94,-306.03 464,-292.59 458.82,-280.88"/>
+<polygon fill="black" stroke="black" points="461.941,-279.284 454.7,-271.55 455.538,-282.112 461.941,-279.284"/>
+</g>
+<!-- inv -->
+<g id="node7" class="node"><title>inv</title>
+<defs>
+<linearGradient id="l_1" gradientUnits="userSpaceOnUse" x1="218.12" y1="-109" x2="218.12" y2="-73" >
+<stop offset="0" style="stop-color:#7bbeca;stop-opacity:1.;"/>
+<stop offset="1" style="stop-color:#ff6347;stop-opacity:1.;"/>
+</linearGradient>
+</defs>
+<polygon fill="url(#l_1)" stroke="none" points="276.984,-109 159.256,-109 159.256,-73 276.984,-73 276.984,-109"/>
+<text text-anchor="start" x="174.43" y="-92.4" font-family="Arial" font-size="12.00">Inverse operator</text>
+<text text-anchor="start" x="167.438" y="-83.6" font-family="Arial" font-style="italic" font-size="8.00">mne.minimum_norm.Inverse</text>
+</g>
+<!-- fwd->inv -->
+<g id="edge7" class="edge"><title>fwd->inv</title>
+<path fill="none" stroke="black" d="M325.92,-153.9C319.92,-144.91 311.64,-134.31 302.12,-127 295.46,-121.88 287.94,-117.38 280.2,-113.46"/>
+<polygon fill="black" stroke="black" points="281.595,-110.248 271.06,-109.12 278.592,-116.571 281.595,-110.248"/>
+</g>
+<!-- stc -->
+<g id="node10" class="node"><title>stc</title>
+<polygon fill="#ff6347" stroke="none" points="143.622,-36 40.622,-36 40.622,-0 143.622,-0 143.622,-36"/>
+<text text-anchor="start" x="48.7685" y="-19.4" font-family="Arial" font-size="12.00">Source estimate</text>
+<text text-anchor="start" x="54.995" y="-10.6" font-family="Arial" font-style="italic" font-size="8.00">mne.SourceEstimate</text>
+</g>
+<!-- inv->stc -->
+<g id="edge14" class="edge"><title>inv->stc</title>
+<path fill="none" stroke="black" d="M187.94,-72.994C171.08,-63.491 149.86,-51.536 131.68,-41.292"/>
+<polygon fill="black" stroke="black" points="132.92,-37.9736 122.49,-36.114 129.484,-44.0722 132.92,-37.9736"/>
+</g>
+<!-- pre -->
+<g id="node8" class="node"><title>pre</title>
+<polygon fill="#7bbeca" stroke="none" points="129.622,-352.6 12.622,-352.6 12.622,-316.6 129.622,-316.6 129.622,-352.6"/>
+<text text-anchor="start" x="20.7577" y="-336" font-family="Arial" font-size="12.00">Preprocessed data</text>
+<text text-anchor="start" x="50.0029" y="-327.2" font-family="Arial" font-style="italic" font-size="8.00">mne.io.Raw</text>
+</g>
+<!-- epo -->
+<g id="node9" class="node"><title>epo</title>
+<polygon fill="#7bbeca" stroke="none" points="116.185,-271.4 26.0595,-271.4 26.0595,-235.4 116.185,-235.4 116.185,-271.4"/>
+<text text-anchor="start" x="34.0907" y="-254.8" font-family="Arial" font-size="12.00">Epoched data</text>
+<text text-anchor="start" x="48.8876" y="-246" font-family="Arial" font-style="italic" font-size="8.00">mne.Epochs</text>
+</g>
+<!-- pre->epo -->
+<g id="edge8" class="edge"><title>pre->epo</title>
+<path fill="none" stroke="black" d="M71.122,-316.41C71.122,-306.24 71.122,-293.13 71.122,-281.58"/>
+<polygon fill="black" stroke="black" points="74.6221,-281.55 71.122,-271.55 67.6221,-281.55 74.6221,-281.55"/>
+<text text-anchor="middle" x="84.464" y="-292.2" font-family="Arial" font-size="8.00">Epochs</text>
+</g>
+<!-- epo->stc -->
+<g id="edge9" class="edge"><title>epo->stc</title>
+<path fill="none" stroke="black" d="M51.145,-235.32C26.304,-211.89 -11.15,-167.99 3.5124,-127 15.444,-93.645 42.606,-63.194 63.734,-43.192"/>
+<polygon fill="black" stroke="black" points="66.2223,-45.6585 71.206,-36.309 61.4796,-40.51 66.2223,-45.6585"/>
+<text text-anchor="middle" x="43.926" y="-129.8" font-family="Arial" font-size="8.00">apply_inverse_epochs</text>
+</g>
+<!-- evo -->
+<g id="node11" class="node"><title>evo</title>
+<polygon fill="#7bbeca" stroke="none" points="132.122,-190.2 38.122,-190.2 38.122,-154.2 132.122,-154.2 132.122,-190.2"/>
+<text text-anchor="start" x="46.2011" y="-173.6" font-family="Arial" font-size="12.00">Averaged data</text>
+<text text-anchor="start" x="62.8876" y="-164.8" font-family="Arial" font-style="italic" font-size="8.00">mne.Evoked</text>
+</g>
+<!-- epo->evo -->
+<g id="edge10" class="edge"><title>epo->evo</title>
+<path fill="none" stroke="black" d="M70.231,-235.05C70.142,-226.84 70.519,-216.94 72.192,-208.2 72.711,-205.49 73.414,-202.71 74.228,-199.97"/>
+<polygon fill="black" stroke="black" points="77.6037,-200.914 77.516,-190.32 70.9778,-198.657 77.6037,-200.914"/>
+<text text-anchor="middle" x="101.59" y="-211" font-family="Arial" font-size="8.00">epochs.average</text>
+</g>
+<!-- cov -->
+<g id="node12" class="node"><title>cov</title>
+<polygon fill="#7bbeca" stroke="none" points="262.165,-190.2 154.075,-190.2 154.075,-154.2 262.165,-154.2 262.165,-190.2"/>
+<text text-anchor="start" x="162.098" y="-173.6" font-family="Arial" font-size="12.00">Noise covariance</text>
+<text text-anchor="start" x="178.995" y="-164.8" font-family="Arial" font-style="italic" font-size="8.00">mne.Covariance</text>
+</g>
+<!-- epo->cov -->
+<g id="edge11" class="edge"><title>epo->cov</title>
+<path fill="none" stroke="black" d="M100.85,-235.21C121.05,-223.54 147.95,-207.99 169.76,-195.38"/>
+<polygon fill="black" stroke="black" points="171.549,-198.389 178.45,-190.35 168.042,-192.33 171.549,-198.389"/>
+<text text-anchor="middle" x="185.03" y="-211" font-family="Arial" font-size="8.00">compute_covariance</text>
+</g>
+<!-- evo->stc -->
+<g id="edge12" class="edge"><title>evo->stc</title>
+<path fill="none" stroke="black" d="M85.911,-154.04C87.114,-127.89 89.416,-77.82 90.855,-46.543"/>
+<polygon fill="black" stroke="black" points="94.3613,-46.4844 91.325,-36.334 87.3687,-46.1624 94.3613,-46.4844"/>
+<text text-anchor="middle" x="114.8" y="-89.1996" font-family="Arial" font-size="8.00">apply_inverse</text>
+</g>
+<!-- cov->inv -->
+<g id="edge13" class="edge"><title>cov->inv</title>
+<path fill="none" stroke="black" d="M210.29,-154.01C211.58,-143.84 213.23,-130.73 214.69,-119.19"/>
+<polygon fill="black" stroke="black" points="218.177,-119.51 215.96,-109.15 211.233,-118.632 218.177,-119.51"/>
+<text text-anchor="middle" x="256.15" y="-129.8" font-family="Arial" font-size="8.00">make_inverse_operator</text>
+</g>
+<!-- raw -->
+<g id="node13" class="node"><title>raw</title>
+<polygon fill="#7bbeca" stroke="none" points="104.622,-443 37.622,-443 37.622,-407 104.622,-407 104.622,-443"/>
+<text text-anchor="start" x="45.7743" y="-426.4" font-family="Arial" font-size="12.00">Raw data</text>
+<text text-anchor="start" x="50.0029" y="-417.6" font-family="Arial" font-style="italic" font-size="8.00">mne.io.Raw</text>
+</g>
+<!-- raw->pre -->
+<g id="edge15" class="edge"><title>raw->pre</title>
+<path fill="none" stroke="black" d="M71.122,-406.95C71.122,-394.56 71.122,-377.51 71.122,-363.19"/>
+<polygon fill="black" stroke="black" points="74.6221,-362.89 71.122,-352.89 67.6221,-362.89 74.6221,-362.89"/>
+<text text-anchor="middle" x="86.014" y="-382.599" font-family="Arial" font-size="8.00">raw.filter</text>
+<text text-anchor="middle" x="86.014" y="-373.4" font-family="Arial" font-size="8.00">etc.</text>
+</g>
+<!-- trans -->
+<g id="node14" class="node"><title>trans</title>
+<defs>
+<linearGradient id="l_2" gradientUnits="userSpaceOnUse" x1="244.12" y1="-271.4" x2="244.12" y2="-235.4" >
+<stop offset="0" style="stop-color:#7bbeca;stop-opacity:1.;"/>
+<stop offset="1" style="stop-color:#ff6347;stop-opacity:1.;"/>
+</linearGradient>
+</defs>
+<polygon fill="url(#l_2)" stroke="none" points="294.62,-271.4 193.62,-271.4 193.62,-235.4 294.62,-235.4 294.62,-271.4"/>
+<text text-anchor="start" x="201.774" y="-250.699" font-family="Arial" font-size="12.00">Head-MRI trans</text>
+</g>
+<!-- trans->fwd -->
+<g id="edge16" class="edge"><title>trans->fwd</title>
+<path fill="none" stroke="black" d="M246.72,-235.2C248.84,-226.25 252.64,-215.65 259.41,-208.2 263.81,-203.37 269,-199.15 274.58,-195.49"/>
+<polygon fill="black" stroke="black" points="276.757,-198.27 283.61,-190.19 273.214,-192.233 276.757,-198.27"/>
+<text text-anchor="middle" x="301.48" y="-211" font-family="Arial" font-size="8.00">make_forward_solution</text>
+</g>
+</g>
+</svg>
diff --git a/doc/source/_static/forking_button.png b/doc/_static/forking_button.png
similarity index 100%
rename from doc/source/_static/forking_button.png
rename to doc/_static/forking_button.png
diff --git a/doc/_static/institutions.png b/doc/_static/institutions.png
new file mode 100644
index 0000000..931d724
Binary files /dev/null and b/doc/_static/institutions.png differ
diff --git a/doc/source/_images/mne_helmet.png b/doc/_static/mne_helmet.png
similarity index 100%
rename from doc/source/_images/mne_helmet.png
rename to doc/_static/mne_helmet.png
diff --git a/doc/_static/mne_logo.png b/doc/_static/mne_logo.png
new file mode 100644
index 0000000..6f4e842
Binary files /dev/null and b/doc/_static/mne_logo.png differ
diff --git a/doc/_static/mne_logo_small.png b/doc/_static/mne_logo_small.png
new file mode 100644
index 0000000..30652de
Binary files /dev/null and b/doc/_static/mne_logo_small.png differ
diff --git a/doc/source/_static/pull_button.png b/doc/_static/pull_button.png
similarity index 100%
rename from doc/source/_static/pull_button.png
rename to doc/_static/pull_button.png
diff --git a/doc/_static/style.css b/doc/_static/style.css
new file mode 100644
index 0000000..858288b
--- /dev/null
+++ b/doc/_static/style.css
@@ -0,0 +1,46 @@
+body {
+    font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+}
+
+.navbar-version {
+    display: none;
+}
+
+a {
+    color: rgb(255,2,16);
+    text-decoration: none;
+}
+
+a:hover {
+    color: rgb(255,2,16);
+}
+
+blockquote {
+    font-size: 100% !important;
+}
+
+code {
+    color: #49759c !important;
+    background-color: #f3f5f9 !important;
+}
+
+.devbar {
+    background-color: red;
+    color: white;
+    font-weight:bold;
+    text-align: center;
+    padding: 10px;
+    min-width: 910px;
+}
+
+.devbar a {
+    color: #b8bec4;
+}
+
+#navbar a:hover {
+    color: rgb(255,2,16);
+}
+
+.note a {
+    color: rgb(255,236,0);
+}
\ No newline at end of file
diff --git a/doc/source/_templates/class.rst b/doc/_templates/class.rst
similarity index 100%
rename from doc/source/_templates/class.rst
rename to doc/_templates/class.rst
diff --git a/doc/source/_templates/function.rst b/doc/_templates/function.rst
similarity index 100%
rename from doc/source/_templates/function.rst
rename to doc/_templates/function.rst
diff --git a/doc/source/_templates/layout.html b/doc/_templates/layout.html
similarity index 63%
rename from doc/source/_templates/layout.html
rename to doc/_templates/layout.html
index c71a450..290d0a6 100755
--- a/doc/source/_templates/layout.html
+++ b/doc/_templates/layout.html
@@ -1,6 +1,12 @@
 {% extends "!layout.html" %}
 
+{# Custom CSS overrides #}
+{% set bootswatch_css_custom = ['_static/style.css'] %}
+
 {% block extrahead %}
+
+<link href='http://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700' rel='stylesheet' type='text/css'>
+
 {% if use_google_analytics|tobool %}
     <script type="text/javascript">
     var _gaq = _gaq || [];
@@ -34,35 +40,15 @@
 {% endif %}
 {% endblock %}
 
-
 {% block relbar2 %}{% endblock %}
 
-{% block rootrellink %}
-        <li><a href="{{ pathto('index') }}">Home</a> | </li>
-        <li><a href="{{ pathto('manual') }}">Manual</a> | </li>
-        <li><a href="{{ pathto('mne-python') }}">Python</a> | </li>
-        <li><a href="{{ pathto('cite') }}">Cite MNE</a> | </li>
-        <!-- <li><a href="{{ pathto('search') }}">Search</a></li> -->
-{% endblock %}
-
 {% block relbar1 %}
 {% if build_dev_html|tobool %}
-<div style="background-color: red; color: white; font-weight:bold; text-align: center; padding: 10px; min-width: 910px">
+<div class="devbar">
 This documentation is for the development version ({{ release }}) - <a href="http://martinos.org/mne/stable">Stable version</a>
 </div>
 {% endif %}
-<div style="background-color: white; text-align: left; padding: 10px 7px 15px 15px; min-width: 910px">
-<div style="float: left">
-<a href="{{ pathto('index') }}"><img src="{{
-pathto("_static/mne_logo.png", 1) }}" border="0" alt="py4sci"/></a>
-</div>
 
-<div style="float: right">
-<a href="{{ pathto('index') }}"><img src="{{
-pathto("_static/institutions.png", 1) }}" border="0" alt="py4sci"/></a>
-</div>
-<br style="clear:both"/>
-</div>
 {{ super() }}
 {% endblock %}
 
diff --git a/doc/advanced_setup.rst b/doc/advanced_setup.rst
new file mode 100644
index 0000000..24303d5
--- /dev/null
+++ b/doc/advanced_setup.rst
@@ -0,0 +1,130 @@
+.. _detailed_notes:
+
+Advanced installation and setup
+===============================
+
+MNE is written in pure Python making it easy to setup on
+any machine with Python >=2.6, NumPy >= 1.6, SciPy >= 0.7.2
+and matplotlib >= 1.1.0.
+
+Some isolated functions (e.g. filtering with firwin2) require SciPy >= 0.9.
+
+To run all documentation examples the following additional packages are required:
+
+    * PySurfer (for visualization of source estimates on cortical surfaces)
+
+    * scikit-learn (for supervised and unsupervised machine learning functionality)
+
+    * pandas >= 0.8 (for export to tabular data structures like excel files)
+
+    * h5py (for reading and writing HDF5-formatted files)
+
+Note. For optimal performance we recommend installing recent versions of
+NumPy (> 1.7), SciPy (> 0.10) and scikit-learn (>= 0.14).
+
+Development Environment
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Note that we explicitly support the following Python setups since they reflect
+our development environments and functionality is best tested for them:
+
+    * Anaconda (Mac, Linux, Windows)
+
+    * Debian / Ubuntu standard system Python + Scipy stack
+
+    * EPD 7.3 (Mac, Linux)
+
+    * Canopy >= 1.0 (Mac, Linux)
+
+CUDA Optimization
+^^^^^^^^^^^^^^^^^
+
+If you want to use NVIDIA CUDA for filtering (can yield 3-4x speedups), you'll
+need to install the NVIDIA toolkit on your system, and then both pycuda and
+scikits.cuda, see:
+
+https://developer.nvidia.com/cuda-downloads
+
+http://mathema.tician.de/software/pycuda
+
+http://wiki.tiker.net/PyCuda/Installation/
+
+https://github.com/lebedov/scikits.cuda
+
+To initialize mne-python cuda support, after installing these dependencies
+and running their associated unit tests (to ensure your installation is correct)
+you can run:
+
+    >>> mne.cuda.init_cuda() # doctest: +SKIP
+
+If you have everything installed correctly, you should see an INFO-level log
+message telling you your CUDA hardware's available memory. To have CUDA
+initialized on startup, you can do:
+
+    >>> mne.utils.set_config('MNE_USE_CUDA', 'true') # doctest: +SKIP
+
+You can test if MNE CUDA support is working by running the associated test:
+
+    nosetests mne/tests/test_filter.py
+
+If all tests pass with none skipped, then mne-python CUDA support works.
+
+Multi-threading
+^^^^^^^^^^^^^^^
+
+For optimal performance we recommend using numpy / scipy with the
+multi-threaded ATLAS, gotoblas2, or intel MKL. For example, the Enthought
+Canopy and the Anaconda distributions ship with tested MKL-compiled
+numpy / scipy versions. Depending on the use case and your system
+this may speed up operations by a factor greater than 10.
+
+pylab
+^^^^^
+
+For the setups listed above we would strongly recommend to use the Qt
+matplotlib backend for fast and correct rendering::
+
+    ipython --pylab qt
+
+On Linux, for example, QT is the only matplotlib backend for which 3D rendering
+will work correctly. On Mac OS X for other backends certain matplotlib
+functions might not work as expected.
+
+IPython notebooks
+^^^^^^^^^^^^^^^^^
+
+To take full advantage of mne-python's visualization capacities in combination
+with IPython notebooks and inline displaying, please explicitly add the
+following magic method invocation to your notebook or configure your notebook
+runtime accordingly.
+
+    %matplotlib inline
+
+If you use another Python setup and you encounter some difficulties please
+report them on the MNE mailing list or on github to get assistance.
+
+
+.. _inside_martinos:
+
+Inside the Martinos Center
+--------------------------
+
+For people within the MGH/MIT/HMS Martinos Center mne is available on the network.
+
+In a terminal do::
+
+    setenv PATH /usr/pubsw/packages/python/epd/bin:${PATH}
+
+If you use Bash replace the previous instruction with::
+
+    export PATH=/usr/pubsw/packages/python/epd/bin:${PATH}
+
+Then start the python interpreter with:
+
+    ipython
+
+Then type::
+
+    >>> import mne
+
+If you get a new prompt with no error messages, you should be good to go.
diff --git a/doc/build_doc b/doc/build_doc
deleted file mode 100755
index 0c75cb2..0000000
--- a/doc/build_doc
+++ /dev/null
@@ -1,16 +0,0 @@
-#! /usr/bin/env python
-
-import os
-import sys
-
-args = sys.argv
-
-if '-h' in args:
-    print "build_doc [--nocfg] [--nosphinx] [--noweb]"
-    sys.exit(0)
-
-if "--nosphinx" not in args:
-    os.system("make html")
-
-# if "--noweb" not in args:
-#     os.system("scp -r build/html/* user at ftp.XXX.edu:pyfiff")
diff --git a/doc/source/cite.rst b/doc/cite.rst
similarity index 67%
rename from doc/source/cite.rst
rename to doc/cite.rst
index 14e323a..57ffc7d 100644
--- a/doc/source/cite.rst
+++ b/doc/cite.rst
@@ -1,14 +1,17 @@
 .. _cite:
 
-Cite MNE and MNE-Python
------------------------
+Cite MNE
+--------
 
 If you use in your research the implementations provided by the MNE software you should cite:
 
-    [1] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, L. Parkkonen, M. Hämäläinen, `MNE software for processing MEG and EEG data <http://www.ncbi.nlm.nih.gov/pubmed/24161808>`_, NeuroImage, Volume 86, 1 February 2014, Pages 446-460, ISSN 1053-8119, `[DOI] <http://dx.doi.org/10.1016/j.neuroimage.2013.10.027>`_
+    [1] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, L. Parkkonen, M. Hämäläinen, `MNE software for processing MEG and EEG data <http://www.ncbi.nlm.nih.gov/pubmed/24161808>`_, NeuroImage, Volume 86, 1 February 2014, Pages 446-460, ISSN 1053-8119, `[DOI] <http://dx.doi.org/10.1016/j.neuroimage.2013.10.027>`__
 
 If you use the Python code you should cite as well:
 
-    [2] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, R. Goj, M. Jas, T. Brooks, L. Parkkonen, M. Hämäläinen, `MEG and EEG data analysis with MNE-Python <http://www.frontiersin.org/Journal/Abstract.aspx?s=1304&name=brain_imaging_methods&ART_DOI=10.3389/fnins.2013.00267>`_, Frontiers in Neuroscience, Volume 7, 2013, ISSN 1662-453X, `[DOI] <http://dx.doi.org/10.3389/fnins.2013.00267>`_
+    [2] A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, R. Goj, M. Jas, T. Brooks, L. Parkkonen, M. Hämäläinen, `MEG and EEG data analysis with MNE-Python <http://journal.frontiersin.org/article/10.3389/fnins.2013.00267/abstract>`_, Frontiers in Neuroscience, Volume 7, 2013, ISSN 1662-453X, `[DOI] <http://dx.doi.org/10.3389/fnins.2013.00267>`__
+
+To cite specific versions of the software, you can use the DOIs provided by
+`Zenodo <https://zenodo.org/search?ln=en&p=mne-python>`_.
 
 You should as well cite the related method papers, some of which are listed in :ref:`ch_reading`.
diff --git a/doc/source/conf.py b/doc/conf.py
similarity index 59%
rename from doc/source/conf.py
rename to doc/conf.py
index 3050a81..5668f85 100644
--- a/doc/source/conf.py
+++ b/doc/conf.py
@@ -3,7 +3,8 @@
 # MNE documentation build configuration file, created by
 # sphinx-quickstart on Fri Jun 11 10:45:48 2010.
 #
-# This file is execfile()d with the current directory set to its containing dir.
+# This file is execfile()d with the current directory set to its containing
+# dir.
 #
 # Note that not all possible configuration values are present in this
 # autogenerated file.
@@ -11,34 +12,40 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys, os
+import sys
+import os
+import os.path as op
+from datetime import date
+
+import sphinxgallery
+import sphinx_bootstrap_theme
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath('../../mne'))
-sys.path.append(os.path.abspath('../sphinxext'))
+curdir = op.dirname(__file__)
+sys.path.append(op.abspath(op.join(curdir, '..', 'mne')))
+sys.path.append(op.abspath(op.join(curdir, 'sphinxext')))
 
 import mne
 
-# -- General configuration -----------------------------------------------------
+# -- General configuration ------------------------------------------------
 
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+import numpy_ext.numpydoc
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.autosummary',
               'sphinx.ext.pngmath',
-              'gen_rst']
+              'sphinx.ext.mathjax',
+              'numpy_ext.numpydoc',
+            #   'sphinx.ext.intersphinx',
+              # 'flow_diagram',
+              'sphinxgallery.gen_gallery']
 
-try:
-    import numpy_ext.numpydoc
-    extensions.append('numpy_ext.numpydoc')
-    # With older versions of sphinx, this causes a crash
-    autosummary_generate = True
-except:
-    # Older version of sphinx
-    extensions.append('numpy_ext_old.numpydoc')
+autosummary_generate = True
 
-autodoc_default_flags=['inherited-members']
+autodoc_default_flags = ['inherited-members']
 
 # extensions = ['sphinx.ext.autodoc',
 #               'sphinx.ext.doctest',
@@ -56,17 +63,14 @@ templates_path = ['_templates']
 source_suffix = '.rst'
 
 # The encoding of source files.
-#source_encoding = 'utf-8'
-
-# Generate the plots for the gallery
-plot_gallery = True
+# source_encoding = 'utf-8'
 
 # The master toctree document.
 master_doc = 'index'
 
 # General information about the project.
 project = u'MNE'
-copyright = u'2012-2013, MNE Developers'
+copyright = u'2012-%s, MNE Developers' % date.today().year
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -79,34 +83,36 @@ release = version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of documents that shouldn't be included in the build.
 unused_docs = ['config_doc.rst']
 
 # List of directories, relative to source directory, that shouldn't be searched
 # for source files.
-exclude_trees = ['build']
+exclude_trees = ['_build']
+exclude_patterns = ['source/generated']
 
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
 pygments_style = 'sphinx'
@@ -115,31 +121,44 @@ pygments_style = 'sphinx'
 modindex_common_prefix = ['mne.']
 
 
-# -- Options for HTML output ---------------------------------------------------
+# -- Options for HTML output --------------------------------------------------
+
 
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'sphinxdoc'
-html_style = 'navy.css'
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'bootstrap'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+html_theme_options = {
+    'navbar_title': ' ',
+    'source_link_position': "footer",
+    'bootswatch_theme': "flatly",
+    'navbar_sidebarrel': False,
+    'bootstrap_version': "3",
+    'navbar_links': [("Tutorials", "tutorials"),
+                     ("Gallery", "auto_examples/index"),
+                     ("Manual", "manual/index"),
+                     ("API", "python_reference"),
+                     ("FAQ", "faq"),
+                     ("Cite", "cite"),
+                     ],
+    }
 
 # Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = ['themes']
+html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+html_logo = "_static/mne_logo_small.png"
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
@@ -149,67 +168,81 @@ html_favicon = "favicon.ico"
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static', '_images']
+html_static_path = ['_static', '_images', sphinxgallery.glr_path_static()]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-html_sidebars = {'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html',
-                        'sidebar.html']}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-html_use_modindex = False
+# html_domain_indices = True
 
 # If false, no index is generated.
-html_use_index = False
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = False
 
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+html_show_sphinx = False
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
 # variables to pass to HTML templating engine
 build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
 
-html_context = {'use_google_analytics':True, 'use_twitter':True,
-                'use_media_buttons':True, 'build_dev_html':build_dev_html}
+html_context = {'use_google_analytics': True, 'use_twitter': True,
+                'use_media_buttons': True, 'build_dev_html': build_dev_html}
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
+# html_file_suffix = ''
 
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'mne-doc'
 
 
-# -- Options for LaTeX output --------------------------------------------------
+# -- Options for LaTeX output ------------------------------------------------
 
 # The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
+# latex_paper_size = 'letter'
 
 # The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# latex_font_size = '10pt'
 
 # Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
 latex_documents = [
-  ('index', 'MNE.tex', u'MNE Manual',
-   u'MNE Contributors', 'manual'),
+#    ('index', 'MNE.tex', u'MNE Manual',
+#     u'MNE Contributors', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
@@ -221,13 +254,29 @@ latex_logo = "_static/logo.png"
 latex_use_parts = True
 
 # Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
 latex_use_modindex = True
 
-
 trim_doctests_flags = True
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+sphinxgallery_conf = {
+    'examples_dirs'   : ['../examples', '../tutorials'],
+    'gallery_dirs'    : ['auto_examples', 'auto_tutorials'],
+    'doc_module': ('sphinxgallery', 'numpy'),
+    'reference_url': {
+        'mne': None,
+        'matplotlib': 'http://matplotlib.org',
+        'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
+        'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
+        'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
+    'find_mayavi_figures': True,
+    'default_thumb_file': '_static/mne_helmet.png',
+    }
diff --git a/doc/source/contributing.rst b/doc/contributing.rst
similarity index 76%
rename from doc/source/contributing.rst
rename to doc/contributing.rst
index 13eb809..d41c999 100644
--- a/doc/source/contributing.rst
+++ b/doc/contributing.rst
@@ -1,8 +1,16 @@
-.. _using-git:
+.. _contributing:
 
+Contributing to MNE project
+===========================
 
-Contributing to *mne-python* source code
-========================================
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+.. We want to thank all MNE Software users at the Martinos Center and
+.. in other institutions for their collaboration during the creation
+.. of this software as well as for useful comments on the software
+.. and its documentation.
 
 We are open to all types of contributions, from bugfixes to functionality
 enhancements. mne-python_ is meant to be maintained by a community of labs,
@@ -17,23 +25,26 @@ coding is done saves everyone time and effort!
 What you will need
 ------------------
 
-#. A Unix (Linux or Mac OS) box: `MNE command line utilities`_ and Freesurfer_
-   that are required to make the best out of this toolbox require a Unix platform.
-
-#. A good python editor: Spyder_ IDE is suitable for those migrating from
-   Matlab. EPD_ and Anaconda_ both ship Spyder and all its dependencies. For
-   Mac users, TextMate_ and `Sublime Text`_ are good choices. `Sublime Text`_
-   is available on all three major platforms.
+#. A good python editor: Atom_ and `Sublime Text`_ are modern general-purpose
+text editors and are available on all three major platforms. Both provide
+plugins that facilitate editing python code and help avoid bugs and style errors.
+See for example linterflake8_ for Atom_.
+The Spyder_ IDE is espectially suitable for those migrating from Matlab.
+EPD_ and Anaconda_ both ship Spyder and all its dependencies.
+As always, Vim or Emacs will suffice as well.
 
 #. Basic scientific tools in python: numpy_, scipy_, matplotlib_
 
-#. Development related tools: nosetests_, coverage_, mayavi_, sphinx_,
+#. Development related tools: nosetests_, coverage_, nose-timer_, mayavi_, sphinx_,
    pep8_, and pyflakes_
 
 #. Other useful packages: pysurfer_, nitime_, pandas_, PIL_, PyDICOM_,
-   joblib_, nibabel_, and scikit-learn_
+   joblib_, nibabel_, h5py_, and scikit-learn_
 
-#. External tools: `MNE command line utilities`_, Freesurfer_, and `mne-scripts`_
+#. `MNE command line utilities`_ and Freesurfer_ are optional but will allow you
+   to make the best out of MNE. Yet they will require a Unix (Linux or Mac OS)
+   system. If you are on Windows, you can install these applications inside a
+   Unix virtual machine.
 
 General code guidelines
 -----------------------
@@ -56,13 +67,16 @@ General code guidelines
 
 * mne-python adheres to the same docstring formatting as seen on
   `numpy style`_.
-  New public functions should have all variables defined.
+  New public functions should have all variables defined. The test suite
+  has some functionality that checks docstrings, but docstrings should
+  still be checked for clarity, uniformity, and completeness.
 
 * New functionality should be covered by appropriate tests, e.g. a method in
-  ``mne/fiff/raw.py`` should have a corresponding test in
-  ``mne/fiff/tests/test_raw.py``. You can use the ``coverage`` module in
+  ``mne/evoked.py`` should have a corresponding test in
+  ``mne/tests/test_evoked.py``. You can use the ``coverage`` module in
   conjunction with ``nosetests`` (nose can automatically determine the code
   coverage if ``coverage`` is installed) to see how well new code is covered.
+  The ambition is to achieve around 85% coverage with tests.
 
 * After changes have been made, **ensure all tests pass**. This can be done
   by running the following from the ``mne-python`` root directory::
@@ -77,13 +91,69 @@ General code guidelines
      make inplace
      nosetests
 
-  Note that the first time this is run, the `mne-python sample dataset`_
-  (~1.2 GB) will be downloaded to the root directory and extracted. This is
-  necessary for running some of the tests and nearly all of the examples.
+  To explicitly download and extract the mne-python testing dataset (~320 MB)
+  run::
+
+     make testing_data
+
+  Alternatively::
+
+     python -c "import mne; mne.datasets.testing.data_path(verbose=True)"
+
+  downloads the test data as well. Having a complete testing dataset is
+  necessary for running the tests. To run the examples you'll need
+  the `mne-python sample dataset`_ which is automatically downloaded
+  when running an example for the first time.
 
   You can also run ``nosetests -x`` to have nose stop as soon as a failed
-  test is found, or run e.g., ``nosetests mne/fiff/tests/test_raw.py`` to run
-  a specific test.
+  test is found, or run e.g., ``nosetests mne/tests/test_event.py`` to run
+  a specific test. In addition, one can run individual tests from python::
+
+     >>> from mne.utils import run_tests_if_main
+     >>> run_tests_if_main()
+
+  For more details see troubleshooting_.
+
+* Update relevant documentation. Update :doc:`whats_new.rst <whats_new>` for new features and :doc:`python_reference.rst <python_reference>` for new classes and standalone functions. :doc:`whats_new.rst <whats_new>` is organized in chronological order with the last feature at the end of the document.
+
+ To ensure that these files were rendered correctly, run the following command::
+
+     make html-noplot
+
+ This will build the docs without building all the examples, which can save some time.
+
+
+More mne-python specific guidelines
+-----------------------------------
+
+* Please, ideally address one and only one issue per pull request (PR).
+* Avoid unnecessary cosmetic changes if they are not the goal of the PR, this will help keep the diff clean and facilitate reviewing.
+* Use underscores to separate words in non class names: n_samples rather than nsamples.
+* Use CamelCase for class names.
+* Use relative imports for references inside mne-python.
+* Use nested imports for ``matplotlib``, ``sklearn``, and ``pandas``.
+* Use ``RdBu_r`` colormap for signed data and ``Reds`` for unsigned data in visualization functions and examples.
+* All visualization functions must accept a ``show`` parameter and return a ``fig`` handle.
+* Efforts to improve test timing without decreasing coverage is well appreciated. To see the top-30 tests in order of decreasing timing, run the following command::
+
+    nosetests --with-timer --timer-top-n 30
+
+* Instance methods that update the state of the object should return self.
+* Use single quotes whenever possible.
+* Prefer generator or list comprehensions over ``filter``, ``map`` and other functional idioms.
+* Use explicit functional constructors for builtin containers to improve readability. E.g. ``list()``, ``dict()``.
+* Avoid nested functions if not necessary and use private functions instead.
+* When adding visualization methods, add public functions to the mne.viz package and use these in the corresponding method.
+* If not otherwise required, methods should deal with state while functions should return copies. There are a few justified exceptions though, e.g. ``equalize_channels``, for memory reasons for example.
+* Update the whats_new.rst file at the end, otherwise merge conflicts are guaranteed to occur.
+* Avoid ``**kwargs`` and ``*args`` in function signatures, they are not user friendly (inspection).
+* Avoid single character variable names if you can. They are not readable and often they don't comply with the builtin debugger.
+* Add at least some brief comment to a private function to help us guess what it does. For complex private functions please write a full documentation.
+
+Profiling in Python
+-------------------
+
+To learn more about profiling python codes please see `the scikit learn profiling site <http://scikit-learn.org/stable/developers/performance.html#performance-howto>`_.
 
 Configuring git
 ---------------
@@ -102,13 +172,17 @@ your contact info::
 
 If you are going to :ref:`setup-github` eventually, this email address should
 be the same as the one used to sign up for a GitHub account. For more
-information about configuring your git installation, see
-:ref:`customizing-git`.
+information about configuring your git installation, see:
+
+.. toctree::
+   :maxdepth: 1
+
+   customizing_git
 
 The following sections cover the installation of the git software, the basic
 configuration, and links to resources to learn more about using git.
 However, you can also directly go to the `GitHub help pages
-<http://help.github.com/>`_ which offer a great introduction to git and
+<https://help.github.com/>`_ which offer a great introduction to git and
 GitHub.
 
 In the present document, we refer to the mne-python ``master`` branch, as the
@@ -121,7 +195,7 @@ Creating a fork
 
 You need to do this only once for each package you want to contribute to. The
 instructions here are very similar to the instructions at
-http://help.github.com/fork-a-repo/ |emdash| please see that page for more
+https://help.github.com/fork-a-repo/ |emdash| please see that page for more
 details. We're repeating some of it here just to give the specifics for the
 mne-python_ project, and to suggest some default names.
 
@@ -167,51 +241,6 @@ These steps can be broken out to be more explicit as:
 
     git clone git at github.com:your-user-name/mne-python.git
 
-#. Create a symbolic link to your mne directory::
-
-   To find the directory in which python packages are installed, go to python
-   and type::
-
-    import site; site.getsitepackages()
-
-   This gives two directories::
-
-    ['/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages']
-
-   When you write examples and import the MNE modules, this is where python
-   searches and imports them from. If you want to avoid installing the
-   package again when you make changes in your source code, it is better to
-   create a symbolic link from the installation directory to the ``mne/``
-   folder containing your source code.
-
-   First, check if there are any ``mne`` or ``mne-*.egg-info`` files in
-   these directories and delete them. Then, find the user directory for
-   installing python packages::
-
-    import site; site.getusersitepackages()
-
-   This might give for instance::
-
-    '~/.local/lib/python2.7/site-packages'
-
-   Then, make a symbolic link to your working directory::
-
-    ln -s <path to mne-python>/mne ~/.local/lib/python2.7/site-packages/mne
-
-   Also for the mne-python scripts::
-   
-    ln -s <path to mne-python>/bin/mne /usr/local/bin/mne
-
-   Since you make a symbolic link to the local directory, you won't require
-   root access while editing the files and the changes in your working
-   directory are automatically reflected in the installation directory. To
-   verify that it works, go to a directory other than the installation
-   directory, run ipython, and then type ``import mne; print mne.__path__``.
-   This will show you from where it imported MNE-Python.
-
-   Now, whenever you make any changes to the code, just restart the
-   ipython kernel for the changes to take effect.
-
 #. Change directory to your new repo::
 
     cd mne-python
@@ -247,20 +276,30 @@ These steps can be broken out to be more explicit as:
    Just for your own satisfaction, show yourself that you now have a new
    'remote', with ``git remote -v show``, giving you something like::
 
-    upstream   git://github.com/mne-tools/mne-python.git (fetch)
-    upstream   git://github.com/mne-tools/mne-python.git (push)
-    origin     git at github.com:your-user-name/mne-python.git (fetch)
-    origin     git at github.com:your-user-name/mne-python.git (push)
+       upstream   git://github.com/mne-tools/mne-python.git (fetch)
+       upstream   git://github.com/mne-tools/mne-python.git (push)
+       origin     git at github.com:your-user-name/mne-python.git (fetch)
+       origin     git at github.com:your-user-name/mne-python.git (push)
 
    Your fork is now set up correctly.
 
+#. Install mne with editing permissions to the installed folder:
+
+   To be able to conveniently edit your files after installing mne-python,
+   install using the following setting::
+
+       $ python setup.py develop --user
+
+   To make changes in the code, edit the relevant files and restart the
+   ipython kernel for changes to take effect.
+
 #. Ensure unit tests pass and html files can be compiled
 
    Make sure before starting to code that all unit tests pass and the
    html files in the ``doc/`` directory can be built without errors. To build
    the html files, first go the ``doc/`` directory and then type::
 
-    make html
+       $ make html
 
    Once it is compiled for the first time, subsequent compiles will only
    recompile what has changed. That's it! You are now ready to hack away.
@@ -288,7 +327,7 @@ sections.
 
 * If you do find yourself merging from the trunk, consider :ref:`rebase-on-trunk`
 
-* **Ensure all tests still pass**
+* **Ensure all tests still pass**. Make `travis`_ happy.
 
 * Ask for code review!
 
@@ -427,7 +466,7 @@ Asking for your changes to be reviewed or merged
 When you are ready to ask for someone to review your code and consider a merge:
 
 #. Go to the URL of your forked repo, say
-   ``http://github.com/your-user-name/mne-python``.
+   ``https://github.com/your-user-name/mne-python``.
 
 #. Use the 'Switch Branches' dropdown menu near the top left of the page to
    select the branch with your changes:
@@ -450,6 +489,11 @@ When you are ready to ask for someone to review your code and consider a merge:
    how the code should be written (features, style, etc.) that are easier to
    implement from the start.
 
+#. Finally, make `travis`_ happy. Ensure that builds in all four jobs pass. To make code python3 compatible, refer to ``externals/six.py``. Use virtual environments to test code on different python versions. Please remember that `travis`_ only runs a subset of the tests and is thus not a substitute for running the entire test suite locally.
+
+#. For the code to be mergeable, please rebase w.r.t master branch.
+
+
 If you are uncertain about what would or would not be appropriate to contribute
 to mne-python, don't hesitate to either send a pull request, or open an issue
 on the mne-python_ GitHub site to discuss potential changes.
@@ -472,7 +516,7 @@ Delete a branch on GitHub
    git push origin :my-unwanted-branch
 
 (Note the colon ``:`` before ``test-branch``.  See also:
-http://github.com/guides/remove-a-remote-branch
+https://help.github.com/remotes
 
 Several people sharing a single repository
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -752,6 +796,8 @@ Editing \*.rst files
 These are reStructuredText files. Consult the Sphinx documentation to learn
 more about editing them.
 
+.. _troubleshooting:
+
 Troubleshooting
 ---------------
 
@@ -771,4 +817,12 @@ Cannot import class from a new \*.py file
 You need to update the corresponding ``__init__.py`` file and then
 restart the ipython kernel.
 
+ICE default IO error handler doing an exit()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the make test command fails with the error ``ICE default IO error
+handler doing an exit()``, try backing up or removing .ICEauthority::
+
+    mv ~/.ICEauthority ~/.ICEauthority.bak
+
 .. include:: links.inc
diff --git a/doc/source/customizing_git.rst b/doc/customizing_git.rst
similarity index 98%
rename from doc/source/customizing_git.rst
rename to doc/customizing_git.rst
index 3e44b63..e3ed1ed 100644
--- a/doc/source/customizing_git.rst
+++ b/doc/customizing_git.rst
@@ -4,6 +4,10 @@
  Customizing git
 =================
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
 .. _git-config-basic:
 
 Overview
diff --git a/doc/faq.rst b/doc/faq.rst
new file mode 100644
index 0000000..9caf70f
--- /dev/null
+++ b/doc/faq.rst
@@ -0,0 +1,75 @@
+.. _faq:
+
+==========================
+Frequently Asked Questions
+==========================
+
+.. contents:: Contents
+   :local:
+
+
+Inverse Solution
+================
+
+How should I regularize the covariance matrix?
+----------------------------------------------
+
+The estimated covariance can be numerically
+unstable and tends to induce correlations between estimated source amplitudes
+and the number of samples available. The MNE manual therefore suggests to regularize the noise covariance matrix (see
+:ref:`CBBHEGAB`), especially if only few samples are available. Unfortunately
+it is not easy to tell the effective number of samples, hence, to chose the appropriate regularization.
+In MNE-Python, regularization is done using advanced regularization methods
+described in [1]_. For this the 'auto' option can be used. With this
+option cross-validation will be used to learn the optimal regularization::
+
+    >>> import mne
+    >>> epochs = mne.read_epochs(epochs_path) # doctest: +SKIP
+    >>> cov = mne.compute_covariance(epochs, tmax=0., method='auto') # doctest: +SKIP
+
+This procedure evaluates the noise covariance quantitatively by how well it whitens the data using the
+negative log-likelihood of unseen data. The final result can also be visually inspected.
+Under the assumption that the baseline does not contain a systematic signal
+(time-locked to the event of interest), the whitened baseline signal should be
+follow a multivariate Gaussian distribution, i.e.,
+whitened baseline signals should be between -1.96 and 1.96 at a given time sample.
+Based on the same reasoning, the expected value for the global field power (GFP)
+is 1 (calculation of the GFP should take into account the true degrees of
+freedom, e.g. ``ddof=3`` with 2 active SSP vectors)::
+
+    >>> evoked = epochs.average() # doctest: +SKIP
+    >>> evoked.plot_white(cov) # doctest: +SKIP
+
+This plot displays both, the whitened evoked signals for each channels and
+the whitened GFP. The numbers in the GFP panel represent the estimated rank of
+the data, which amounts to the effective degrees of freedom by which the
+squared sum across sensors is divided when computing the whitened GFP.
+The whitened GFP also helps detecting spurious late evoked components which
+can be the consequence of over- or under-regularization.
+
+Note that if data have been processed using signal space separation (SSS) [2]_,
+gradiometers and magnetometers will be displayed jointly because both are
+reconstructed from the same SSS basis vectors with the same numerical rank.
+This also implies that both sensor types are not any longer linearly independent.
+
+These methods for evaluation can be used to assess model violations. Additional
+introductory materials can be found [here](https://speakerdeck.com/dengemann/eeg-sensor-covariance-using-cross-validation).
+
+For expert use cases or debugging the alternative estimators can also be compared::
+
+    >>> covs = mne.compute_covariance(epochs, tmax=0., method='auto', return_estimators=True) # doctest: +SKIP
+    >>> evoked = epochs.average() # doctest: +SKIP
+    >>> evoked.plot_white(covs) # doctest: +SKIP
+
+This will plot the whitened evoked for the optimal estimator and display the GFPs
+for all estimators as separate lines in the related panel.
+
+References
+----------
+
+.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+    covariance estimation and spatial whitening of MEG and EEG signals,
+    vol. 108, 328-342, NeuroImage.
+
+.. [2] Taulu, S., Simola, J., Kajola, M., 2005. Applications of the signal space
+    separation method. IEEE Trans. Signal Proc. 53, 3359–3372.
diff --git a/doc/getting_started.rst b/doc/getting_started.rst
new file mode 100644
index 0000000..9bcbe34
--- /dev/null
+++ b/doc/getting_started.rst
@@ -0,0 +1,321 @@
+.. _getting_started:
+
+Getting Started
+===============
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+.. XXX do a Getting for both C and Python
+
+MNE is an academic software package that aims to provide data analysis
+pipelines encompassing all phases of M/EEG data processing.
+It consists of two subpackages which are fully integrated
+and compatible: the original MNE-C (distributed as compiled C code)
+and MNE-Python. A basic :ref:`ch_matlab` is also available mostly
+to allow reading and write MNE files. For source localization
+the software depends on anatomical MRI processing tools provided
+by the `FreeSurfer`_ software.
+
+.. _FreeSurfer: http://surfer.nmr.mgh.harvard.edu
+
+Downloading and installing the Unix commands
+--------------------------------------------
+
+.. note::
+
+    If you are working at the Martinos Center see :ref:`setup_martinos`
+    for instructions to work with MNE and to access the Neuromag software.
+
+The MNE Unix commands can be downloaded at:
+
+* `Download <http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php>`_ MNE
+
+:ref:`c_reference` gives an overview of the command line
+tools provided with MNE.
+
+System requirements
+###################
+
+The MNE Unix commands runs on Mac OSX and LINUX operating systems.
+The hardware and software requirements are:
+
+- Mac OSX version 10.5 (Leopard) or later.
+
+- LINUX kernel 2.6.9 or later
+
+- On both LINUX and Mac OSX 32-bit and 64-bit Intel platforms
+  are supported. PowerPC version on Mac OSX can be provided upon request.
+
+- At least 2 GB of memory, 4 GB or more recommended.
+
+- Disk space required for the MNE software: 80 MB
+
+- Additional open source software on Mac OSX, see :ref:`BABDBCJE`.
+
+Installation
+############
+
+The MNE software is distributed as a compressed tar archive
+(Mac OSX and LINUX) or a Mac OSX disk image (dmg).
+
+The file names follow the convention:
+
+MNE-* <*version*>*- <*rev*> -* <*Operating
+system*>*-* <*Processor*>*.* <*ext*>*
+
+The present version number is 2.7.0. The <*rev*> field
+is the SVN revision number at the time this package was created.
+The <*Operating system*> field
+is either Linux or MacOSX. The <*processor*> field
+is either i386 or x86_64. The <*ext*> field
+is 'gz' for compressed tar archive files and 'dmg' for
+Mac OSX disk images.
+
+Installing from a compressed tar archive
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Go to the directory where you want the software to be installed:
+
+``cd`` <*dir*>
+
+Unpack the tar archive:
+
+``tar zxvf`` <*software package*>
+
+The name of the software directory under <*dir*> will
+be the same as the package file without the .gz extension.
+
+Installing from a Mac OSX disk image
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Double click on the disk image file.
+  A window opens with the installer package ( <*name*> .pkg)
+  inside.
+
+- Double click the the package file. The installer starts.
+
+- Follow the instructions in the installer.
+
+.. note::
+
+    The software will be installed to /Applications/ <*name*> by default.
+    If you want another location, select Choose Folder... on the Select a
+    Destination screen in the installer.
+
+.. _user_environment:
+
+Setting up MNE Unix commands environment
+########################################
+
+The system-dependent location of the MNE Software will be
+here referred to by the environment variable MNE_ROOT. There are
+two scripts for setting up user environment so that the software
+can be used conveniently:
+
+``$MNE_ROOT/bin/mne_setup_sh``
+
+and
+
+``$MNE_ROOT/bin/mne_setup``
+
+compatible with the POSIX and csh/tcsh shells, respectively. Since
+the scripts set environment variables they should be 'sourced' to
+the present shell. You can find which type of a shell you are using
+by saying
+
+``echo $SHELL``
+
+If the output indicates a POSIX shell (bash or sh) you should issue
+the three commands:
+
+``export MNE_ROOT=`` <*MNE*> ``export MATLAB_ROOT=`` <*Matlab*> ``. $MNE_ROOT/bin/mne_setup_sh``
+
+with <*MNE*> replaced
+by the directory where you have installed the MNE software and <*Matlab*> is
+the directory where Matlab is installed. If you do not have Matlab,
+leave MATLAB_ROOT undefined. If Matlab is not available, the utilities
+mne_convert_mne_data , mne_epochs2mat , mne_raw2mat ,
+and mne_simu will not work.
+
+For csh/tcsh the corresponding commands are:
+
+``setenv MNE_ROOT`` <*MNE*> ``setenv MATLAB_ROOT`` <*Matlab*> ``source $MNE_ROOT/bin/mne_setup``
+
+For BEM mesh generation using the watershed algorithm or
+on the basis of multi-echo FLASH MRI data (see :ref:`create_bem_model`) and
+for accessing the tkmedit program
+from mne_analyze, see :ref:`CACCHCBF`,
+the MNE software needs access to a FreeSurfer license
+and software. Therefore, to use these features it is mandatory that
+you set up the FreeSurfer environment
+as described in the FreeSurfer documentation.
+
+The environment variables relevant to the MNE software are
+listed in :ref:`CIHDGFAA`.
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.55\linewidth}|
+.. _CIHDGFAA:
+.. table:: Environment variables
+
+    +-------------------------+--------------------------------------------+
+    | Name of the variable    |   Description                              |
+    +=========================+============================================+
+    | MNE_ROOT                | Location of the MNE software, see above.   |
+    +-------------------------+--------------------------------------------+
+    | FREESURFER_HOME         | Location of the FreeSurfer software.       |
+    |                         | Needed during FreeSurfer reconstruction    |
+    |                         | and if the FreeSurfer MRI viewer is used   |
+    |                         | with mne_analyze, see :ref:`CACCHCBF`.     |
+    +-------------------------+--------------------------------------------+
+    | SUBJECTS_DIR            | Location of the MRI data.                  |
+    +-------------------------+--------------------------------------------+
+    | SUBJECT                 | Name of the current subject.               |
+    +-------------------------+--------------------------------------------+
+    | MNE_TRIGGER_CH_NAME     | Name of the trigger channel in raw data,   |
+    |                         | see :ref:`mne_process_raw`.                |
+    +-------------------------+--------------------------------------------+
+    | MNE_TRIGGER_CH_MASK     | Mask to be applied to the trigger channel  |
+    |                         | values, see :ref:`mne_process_raw`.        |
+    +-------------------------+--------------------------------------------+
+
+.. _BABDBCJE:
+
+Additional software
+###################
+
+MNE uses the 'Netpbm' package (http://netpbm.sourceforge.net/)
+to create image files in formats other than tif and rgb from mne_analyze and mne_browse_raw .
+This package is usually present on LINUX systems. On Mac OSX, you
+need to install the netpbm package. The recommended way to do this
+is to use the MacPorts Project tools, see http://www.macports.org/:
+
+- If you have not installed the MacPorts
+  software, goto http://www.macports.org/install.php and follow the
+  instructions to install MacPorts.
+
+- Install the netpbm package by saying: ``sudo port install netpbm``
+
+MacPorts requires that you have the XCode developer tools
+and X11 windowing environment installed. X11 is also needed by MNE.
+For Mac OSX Leopard, we recommend using XQuartz (http://xquartz.macosforge.org/).
+As of this writing, XQuartz does not yet exist for SnowLeopard;
+the X11 included with the operating system is sufficient.
+
+.. _CIHIIBDA:
+
+Testing the performance of your OpenGL graphics
+###############################################
+
+The graphics performance of mne_analyze depends
+on your graphics software and hardware configuration. You get the
+best performance if you are using mne_analyze locally
+on a computer and the hardware acceleration capabilities are in
+use. You can check the On GLX... item
+in the help menu of mne_analyze to
+see whether the hardware acceleration is in effect. If the dialog
+popping up says Direct rendering context ,
+you are using hardware acceleration. If this dialog indicates Nondirect rendering context , you are either using software
+emulation locally, rendering to a remote display, or employing VNC
+connection. If you are rendering to a local display and get an indication
+of Nondirect rendering context ,
+software emulation is in effect and you should contact your local
+computer support to enable hardware acceleration for GLX. In some
+cases, this may require acquiring a new graphics display card. Fortunately,
+relatively high-performance OpenGL-capable graphics cards very inexpensive.
+
+There is also an utility mne_opengl_test to
+assess the graphics performance more quantitatively. This utility
+renders an inflated brain surface repeatedly, rotating it by 5 degrees
+around the *z* axis between redraws. At each
+revolution, the time spent for the full revolution is reported on
+the terminal window where mne_opengl_test was
+started from. The program renders the surface until the interrupt
+key (usually control-c) is pressed on the terminal window.
+
+mne_opengl_test is located
+in the ``bin`` directory and is thus started as:
+
+``$MNE_ROOT/bin/mne_opengl_test``
+
+On the fastest graphics cards, the time per revolution is
+well below 1 second. If this time longer than 10 seconds either
+the graphics hardware acceleration is not in effect or you need
+a faster graphics adapter.
+
+Obtain FreeSurfer
+#################
+
+The MNE software relies on the FreeSurfer software for cortical
+surface reconstruction and other MRI-related tasks. Please consult
+the FreeSurfer home page site at ``http://surfer.nmr.mgh.harvard.edu/`` .
+
+
+Downloading and installing MNE-Python
+-------------------------------------
+
+.. note::
+
+    If you are at the Martinos Center, please see this section :ref:`inside_martinos`.
+
+New to the Python programming language?
+#######################################
+
+This is a very good place to get started: http://scipy-lectures.github.io.
+
+Installing the Python interpreter
+#################################
+
+For a fast and up to date scientific Python environment that resolves all
+dependencies, we recommend the Anaconda Python distribution:
+
+https://store.continuum.io/cshop/anaconda/
+
+Anaconda is free for academic purposes.
+
+To test that everything works properly, open up IPython::
+
+    ipython --pylab qt
+
+Now that you have a working Python environment you can install MNE-Python.
+
+mne-python installation
+#######################
+
+Most users should start with the "stable" version of mne-python, which can
+be installed this way:
+
+    pip install mne --upgrade
+
+For the newest features (and potentially more bugs), you can instead install
+the development version by:
+
+    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
+
+If you plan to contribute to the project, please follow the git instructions: 
+:ref:`contributing`.
+
+If you would like to use a custom installation of python (or have specific
+questions about integrating special tools like IPython notebooks), please
+see this section :ref:`detailed_notes`.
+
+Checking your installation
+##########################
+
+To check that everything went fine, in ipython, type::
+
+    >>> import mne
+
+If you get a new prompt with no error messages, you should be good to go!
+Consider reading the :ref:`detailed_notes` for more advanced options and
+speed-related enhancements.
+
+Going beyond
+------------
+
+Now you're ready to read our:
+
+  * :ref:`tutorials`
+  * `Examples <auto_examples/index.html>`_
+  * :ref:`manual`
diff --git a/doc/source/git_links.inc b/doc/git_links.inc
similarity index 64%
rename from doc/source/git_links.inc
rename to doc/git_links.inc
index 15a0c02..617072d 100644
--- a/doc/source/git_links.inc
+++ b/doc/git_links.inc
@@ -10,8 +10,8 @@
 
 .. git stuff
 .. _git: http://git-scm.com/
-.. _github: http://github.com
-.. _GitHub Help: http://help.github.com
+.. _github: https://github.com
+.. _GitHub Help: https://help.github.com
 .. _msysgit: http://code.google.com/p/msysgit/downloads/list
 .. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
 .. _subversion: http://subversion.tigris.org/
@@ -19,9 +19,9 @@
 .. _pro git book: http://progit.org/
 .. _git svn crash course: http://git-scm.com/course/svn.html
 .. _learn.github: http://learn.github.com/
-.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
-.. _git user manual: http://schacon.github.com/git/user-manual.html
-.. _git tutorial: http://schacon.github.com/git/gittutorial.html
+.. _network graph visualizer: https://github.com/blog/39-say-hello-to-the-network-graph-visualizer
+.. _git user manual: https://schacon.github.io/git/user-manual.html
+.. _git tutorial: https://schacon.github.io/git/gittutorial.html
 .. _git community book: http://book.git-scm.com/
 .. _git ready: http://www.gitready.com/
 .. _git casts: http://www.gitcasts.com/
@@ -29,37 +29,40 @@
 .. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
 .. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
 .. _git clone: http://schacon.github.com/git/git-clone.html
-.. _git checkout: http://schacon.github.com/git/git-checkout.html
-.. _git commit: http://schacon.github.com/git/git-commit.html
-.. _git push: http://schacon.github.com/git/git-push.html
-.. _git pull: http://schacon.github.com/git/git-pull.html
-.. _git add: http://schacon.github.com/git/git-add.html
-.. _git status: http://schacon.github.com/git/git-status.html
-.. _git diff: http://schacon.github.com/git/git-diff.html
-.. _git log: http://schacon.github.com/git/git-log.html
-.. _git branch: http://schacon.github.com/git/git-branch.html
-.. _git remote: http://schacon.github.com/git/git-remote.html
-.. _git rebase: http://schacon.github.com/git/git-rebase.html
-.. _git config: http://schacon.github.com/git/git-config.html
+.. _git checkout: https://schacon.github.io/git/git-checkout.html
+.. _git commit: https://schacon.github.io/git/git-commit.html
+.. _git push: https://schacon.github.io/git/git-push.html
+.. _git pull: https://schacon.github.io/git/git-pull.html
+.. _git add: https://schacon.github.io/git/git-add.html
+.. _git status: https://schacon.github.io/git/git-status.html
+.. _git diff: https://schacon.github.io/git/git-diff.html
+.. _git log: https://schacon.github.io/git/git-log.html
+.. _git branch: https://schacon.github.io/git/git-branch.html
+.. _git remote: https://schacon.github.io/git/git-remote.html
+.. _git rebase: https://schacon.github.io/git/git-rebase.html
+.. _git config: https://schacon.github.io/git/git-config.html
 .. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
 .. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
 .. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git
 .. _git management: http://kerneltrap.org/Linux/Git_Management
 .. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
 .. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
-.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html
-.. _deleting master on github: http://matthew-brett.github.com/pydagogue/gh_delete_master.html
-.. _rebase without tears: http://matthew-brett.github.com/pydagogue/rebase_without_tears.html
-.. _resolving a merge: http://schacon.github.com/git/user-manual.html#resolving-a-merge
+.. _git foundation: http://matthew-brett.github.io/pydagogue/foundation.html
+.. _deleting master on github: http://matthew-brett.github.io/pydagogue/gh_delete_master.html
+.. _rebase without tears: http://matthew-brett.github.io/pydagogue/rebase_without_tears.html
+.. _resolving a merge: https://schacon.github.io/git/user-manual.html#resolving-a-merge
 .. _ipython git workflow: http://mail.scipy.org/pipermail/ipython-dev/2010-October/006746.html
 
 .. other stuff
 .. _python: http://www.python.org
+.. _travis: https://travis-ci.org/
+.. _linterflake8: https://atom.io/packages/linter-flake8
 
 .. python packages
-.. _pep8: http://pypi.python.org/pypi/pep8
-.. _pyflakes: http://pypi.python.org/pypi/pyflakes
+.. _pep8: https://pypi.python.org/pypi/pep8
+.. _pyflakes: https://pypi.python.org/pypi/pyflakes
 .. _coverage: https://pypi.python.org/pypi/coverage
+.. _nose-timer: https://pypi.python.org/pypi/nose-timer
 .. _nosetests: https://nose.readthedocs.org/en/latest/
 .. _mayavi: http://mayavi.sourceforge.net/
 .. _nitime: http://nipy.org/nitime/
@@ -73,10 +76,10 @@
 .. _PIL: https://pypi.python.org/pypi/PIL
 
 .. python editors
+.. _atom: https://atom.io/
 .. _spyder: http://spyder-ide.blogspot.com/
 .. _anaconda: http://www.continuum.io/downloads
 .. _EPD: https://www.enthought.com/products/epd/
-.. _textmate: http://macromates.com/
 .. _sublime text: http://www.sublimetext.com/
 
 .. mne stuff
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..3553bd9
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,137 @@
+.. title:: Home
+
+.. raw:: html
+
+    <div class="container"><div class="row">
+    <div class="col-md-8"><div style="text-align: center; height: 270px">
+    <span style="display: inline-block; height: 100%; vertical-align: middle"></span>
+    <a href="index.html"><img src="_static/mne_logo.png" border="0" alt="MNE" style="vertical-align: middle"></a>
+    </div></div>
+    <div class="col-md-4"><div style="float: left">
+    <a href="index.html"><img src="_static/institutions.png"" border="0" alt="Institutions"/></a>
+    </div></div>
+    </div></div>
+
+.. raw:: html
+
+   <div class="container-fluid">
+   <div class="row">
+   <div class="col-md-8">
+   <br>
+
+MNE is a community-driven software package designed for for **processing
+electroencephalography (EEG) and magnetoencephalography (MEG) data**
+providing comprehensive tools and workflows for:
+
+1. Preprocessing
+2. Source estimation
+3. Time–frequency analysis
+4. Statistical testing
+5. Estimation of functional connectivity
+6. Applying machine learning algorithms
+7. Visualization of sensor- and source-space data
+
+MNE includes a comprehensive Python package (provided under the simplified
+BSD license), supplemented by tools compiled from C code for the LINUX and
+Mac OSX operating systems, as well as a MATLAB toolbox.
+
+**From raw data to source estimates in about 30 lines of code:**
+
+.. code:: python
+
+    >>> import mne  # doctest: +SKIP
+    >>> raw = mne.io.Raw('raw.fif', preload=True)  # load data  # doctest: +SKIP
+    >>> raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels  # doctest: +SKIP
+    >>> raw.filter(l_freq=None, h_freq=40.0)  # low-pass filter data  # doctest: +SKIP
+    >>> # Extract epochs and save them:
+    >>> picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True,  # doctest: +SKIP
+    >>>                        exclude='bads')  # doctest: +SKIP
+    >>> events = mne.find_events(raw)  # doctest: +SKIP
+    >>> reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)  # doctest: +SKIP
+    >>> epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,  # doctest: +SKIP
+    >>>                     proj=True, picks=picks, baseline=(None, 0),  # doctest: +SKIP
+    >>>                     preload=True, reject=reject)  # doctest: +SKIP
+    >>> # Compute evoked response and noise covariance
+    >>> evoked = epochs.average()  # doctest: +SKIP
+    >>> cov = mne.compute_covariance(epochs, tmax=0)  # doctest: +SKIP
+    >>> evoked.plot()  # plot evoked  # doctest: +SKIP
+    >>> # Compute inverse operator:
+    >>> fwd_fname = 'sample_audvis−meg−eeg−oct−6−fwd.fif'  # doctest: +SKIP
+    >>> fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)  # doctest: +SKIP
+    >>> inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd,  # doctest: +SKIP
+    >>>                                              cov, loose=0.2)  # doctest: +SKIP
+    >>> # Compute inverse solution:
+    >>> stc = mne.minimum_norm.apply_inverse(evoked, inv, lambda2=1./9.,  # doctest: +SKIP
+    >>>                                      method='dSPM')  # doctest: +SKIP
+    >>> # Morph it to average brain for group study and plot it
+    >>> stc_avg = mne.morph_data('sample', 'fsaverage', stc, 5, smooth=5)  # doctest: +SKIP
+    >>> stc_avg.plot()  # doctest: +SKIP
+
+The MNE development is supported by National Institute of Biomedical Imaging and Bioengineering
+grants 5R01EB009048 and P41EB015896 (Center for Functional Neuroimaging Technologies) as well as
+NSF awards 0958669 and 1042134. It has been supported by the
+NCRR *Center for Functional Neuroimaging Technologies* P41RR14075-06, the
+NIH grants 1R01EB009048-01, R01 EB006385-A101, 1R01 HD40712-A1, 1R01
+NS44319-01, and 2R01 NS37462-05, ell as by Department of Energy
+under Award Number DE-FG02-99ER62764 to The MIND Institute.
+
+.. raw:: html
+
+   <div class="col-md-8">
+       <script type="text/javascript" src="http://www.ohloh.net/p/586838/widgets/project_basic_stats.js"></script>
+   </div>
+
+
+.. raw:: html
+
+   </div>
+   <div class="col-md-4">
+   <h2>Documentation</h2>
+
+.. toctree::
+   :maxdepth: 1
+
+   getting_started
+   whats_new
+   cite
+   references
+   tutorials
+   auto_examples/index
+   manual/index
+   python_reference
+   generated/commands
+   faq
+   advanced_setup
+   mne_cpp
+
+.. raw:: html
+
+   <h2>Community</h2>
+
+* | Analysis talk: join the
+  | `MNE mailing list <http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis>`_
+
+* | Feature requests and bug reports:
+  | `GitHub issues <https://github.com/mne-tools/mne-python/issues/>`_
+
+* | Chat with developers:
+  | `Gitter <https://gitter.im/mne-tools/mne-python>`_
+
+* :ref:`Contribute to MNE! <contributing>`
+
+.. raw:: html
+
+   <h2>Versions</h2>
+
+   <ul>
+      <li><a href=http://martinos.org/mne/stable>Stable</a></li>
+      <li><a href=http://martinos.org/mne/dev>Development</a></li>
+   </ul>
+
+   <div style="float: left; padding: 10px; width: 100%;">
+       <a class="twitter-timeline" href="https://twitter.com/mne_python" data-widget-id="317730454184804352">Tweets by @mne_python</a>
+   </div>
+
+   </div>
+   </div>
+   </div>
diff --git a/doc/source/known_projects.inc b/doc/known_projects.inc
similarity index 87%
rename from doc/source/known_projects.inc
rename to doc/known_projects.inc
index 2a39d82..3d040ac 100644
--- a/doc/source/known_projects.inc
+++ b/doc/known_projects.inc
@@ -6,7 +6,7 @@
 .. _`PROJECTNAME mailing list`: http://projects.scipy.org/mailman/listinfo/nipy-devel
 
 .. numpy
-.. _numpy: http://numpy.scipy.org
+.. _numpy: http://www.numpy.org
 .. _`numpy github`: http://github.com/numpy/numpy
 .. _`numpy style`: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
 .. _`numpy mailing list`: http://mail.scipy.org/mailman/listinfo/numpy-discussion
@@ -36,11 +36,16 @@
 .. _`nibabel github`: http://github.com/nipy/nibabel
 .. _`nibabel mailing list`: http://mail.scipy.org/mailman/listinfo/nipy-devel
 
+.. h5py
+.. _h5py: http://www.h5py.org
+.. _`h5py github`: http://github.com/h5py/h5py
+.. _`h5py mailing list`: https://groups.google.com/forum/#!forum/h5py
+
 .. marsbar
 .. _marsbar: http://marsbar.sourceforge.net
 .. _`marsbar github`: http://github.com/matthew-brett/marsbar
 .. _`MarsBaR mailing list`: https://lists.sourceforge.net/lists/listinfo/marsbar-users
 
 .. Astropy
-.. _Astropy: http://astropy.org
+.. _Astropy: http://www.astropy.org
 .. _`Astropy GitHub`: http://github.com/astropy/astropy
diff --git a/doc/source/links.inc b/doc/links.inc
similarity index 100%
rename from doc/source/links.inc
rename to doc/links.inc
diff --git a/doc/source/manual/AppA.rst b/doc/manual/appendix/bem_model.rst
similarity index 90%
rename from doc/source/manual/AppA.rst
rename to doc/manual/appendix/bem_model.rst
index 3434c49..a24a717 100644
--- a/doc/source/manual/AppA.rst
+++ b/doc/manual/appendix/bem_model.rst
@@ -1,11 +1,15 @@
 
-
 .. _create_bem_model:
 
 =======================
 Creating the BEM meshes
 =======================
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
 .. _BABBDHAG:
 
 Using the watershed algorithm
@@ -14,22 +18,7 @@ Using the watershed algorithm
 The watershed algorithm\egonne *et al.*,
 2004] is part of the FreeSurfer software.
 The name of the program is mri_watershed .
-Its use in the MNE environment is facilitated by the script mne_watershed_bem ,
-which assumes the following options:
-
-**\---subject  <*subject*>**
-
-    Defines the name of the subject. This can be also accomplished
-    by setting the SUBJECT environment variable.
-
-**\---overwrite**
-
-    Overwrite the results of previous run of mne_watershed_bem .
-
-**\---atlas**
-
-    Makes mri_watershed to employ
-    atlas information to correct the segmentation.
+Its use in the MNE environment is facilitated by the script `mne_watershed_bem`.
 
 After mne_watershed_bem has
 completed, the following files appear in the subject's ``bem/watershed`` directory:
@@ -88,7 +77,19 @@ following steps:
 
 - Inspecting the meshes with tkmedit, see :ref:`BABHJBED`.
 
-.. note:: The following sections assume that you have    run the appropriate setup scripts to make both MNE and FreeSurfer    software available.
+.. note:: Different methods can be employed for the creation of the
+          individual surfaces. For example, it may turn out that the 
+          watershed algorithm produces are better quality skin surface than
+          the segmentation approach based on the FLASH images. If this is 
+          the case, ``outer_skin.surf`` can set to point to the corresponding
+          watershed output file while the other surfaces can be picked from
+          the FLASH segmentation data.
+
+.. note:: The :ref:`mne_convert_surface` C utility can be used to convert
+          text format triangulation files into the FreeSurfer surface format.
+
+.. note:: The following sections assume that you have run the appropriate
+          setup scripts to make both MNE and FreeSurfer software available.
 
 .. _BABEBJHI:
 
@@ -138,35 +139,11 @@ Creating the surface tessellations
 ==================================
 
 The BEM surface segmentation and tessellation is automated
-with the script mne_flash_bem .
+with the script :ref:`mne_flash_bem`.
 It assumes that a FreeSurfer reconstruction for this subject is
-already in place. The following options can be specified:
-
-**\---help**
-
-    Prints the usage information.
-
-**\---usage**
-
-    Prints the usage information.
-
-**\---noconvert**
-
-    Skip conversion of the original MRI data. The original data are
-    not needed and the preparatory steps 1.-3. listed below
-    are thus not needed.
-
-**\---noflash30**
-
-    The 30-degree flip angle data are not used.
-
-**\---unwarp  <*type*>**
-
-    Run grad_unwarp with ``--unwarp``  <*type*> option on each of the converted
-    data sets.
+already in place.
 
-Before running mne_flash_bem do
-the following:
+Before running mne_flash_bem do the following:
 
 - Run mne_organize_dicom as
   described above.
@@ -233,9 +210,7 @@ Inspecting the meshes
 =====================
 
 It is advisable to check the validity of the BEM meshes before
-using them. This can be done with help of tkmedit either
-before or after executing mne_setup_forward_model,
-see :ref:`CIHDBFEG`.
+using them. This can be done with help of tkmedit, see :ref:`CIHDBFEG`.
 
 Using seglab
 ############
@@ -319,7 +294,7 @@ For further information, see http://brainsuite.usc.edu/.
 
 The conversion of BrainSuite tessellation
 files to MNE software compatible formats is accomplished with the mne_convert_surface utility,
-covered in :ref:`BEHDIAJG`.
+covered in :ref:`mne_convert_surface`.
 
 The workflow needed to employ the BrainSuite tessellations
 is:
diff --git a/doc/source/manual/AppEULA.rst b/doc/manual/appendix/c_EULA.rst
similarity index 96%
rename from doc/source/manual/AppEULA.rst
rename to doc/manual/appendix/c_EULA.rst
index c85d38b..b4eabd9 100644
--- a/doc/source/manual/AppEULA.rst
+++ b/doc/manual/appendix/c_EULA.rst
@@ -6,6 +6,11 @@
 Licence agreement
 =================
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
 This appendix includes the terms of the MNE software End-User
 License Agreement (EULA).
 
@@ -124,8 +129,9 @@ affiliated with such individual at the time of such download (the "Institution")
 - *General.* This Agreement constitutes
   the entire understanding between you and Licensor with respect to
   the subject matter hereof, and supercedes any prior or contemporaneous
-  oral or written agreements with respect thereto.  :ref:`CHDBAFGJ`,:ref:`ch_browse`,
-  :ref:`ch_mne`, :ref:`ch_morph`, :ref:`ch_convert`, and :ref:`ch_misc` shall survive any termination of this Agreement.  This
+  oral or written agreements with respect thereto.  :ref:`c_reference`,
+  :ref:`ch_browse`, :ref:`ch_mne`, :ref:`ch_morph`, and :ref:`ch_misc`
+  shall survive any termination of this Agreement.  This
   Agreement may be modified or amended only in a writing signed by
   duly authorized representatives of both Parties hereto.  The invalidity
   or unenforceability of any provision of this Agreement shall not
diff --git a/doc/manual/appendix/c_misc.rst b/doc/manual/appendix/c_misc.rst
new file mode 100644
index 0000000..261bb3f
--- /dev/null
+++ b/doc/manual/appendix/c_misc.rst
@@ -0,0 +1,99 @@
+Miscellaneous C functionality
+=============================
+
+.. _BABCCEHF:
+
+Setting up anatomical MR images for MRIlab
+------------------------------------------
+
+If you have the Neuromag software installed, the Neuromag
+MRI viewer, MRIlab, can be used to access the MRI slice data created
+by FreeSurfer . In addition, the
+Neuromag MRI directories can be used for storing the MEG/MRI coordinate
+transformations created with mne_analyze ,
+see :ref:`CACEHGCD`.  During the computation of the forward
+solution, mne_do_forwand_solution searches
+for the MEG/MRI coordinate in the Neuromag MRI directories, see :ref:`BABCHEJD`. The fif files created by mne_setup_mri can
+be loaded into Matlab with the fiff_read_mri function,
+see :ref:`ch_matlab`.
+
+These functions require running the script mne_setup_mri which
+requires that the subject is set with the ``--subject`` option
+or by the SUBJECT environment variable. The script processes one
+or more MRI data sets from ``$SUBJECTS_DIR/$SUBJECT/mri`` ,
+by default they are T1 and brain. This default can be changed by
+specifying the sets by one or more ``--mri`` options.
+
+The script creates the directories ``mri/`` <*name*> ``-neuromag/slices`` and ``mri/`` <*name*> ``-neuromag/sets`` .
+If the input data set is in COR format, mne_setup_mri makes
+symbolic links from the COR files in the directory ``mri/`` <*name*> into ``mri/`` <*name*> ``-neuromag/slices`` ,
+and creates a corresponding fif file COR.fif in ``mri/`` <*name*> ``-neuromag/sets`` ..
+This "description file" contains references to
+the actual MRI slices.
+
+If the input MRI data are stored in the newer mgz format,
+the file created in the ``mri/`` <*name*> ``-neuromag/sets`` directory
+will include the MRI pixel data as well. If available, the coordinate
+transformations to allow conversion between the MRI (surface RAS)
+coordinates and MNI and FreeSurfer Talairach coordinates are copied
+to the MRI description file. mne_setup_mri invokes mne_make_cor_set ,
+described in :ref:`mne_make_cor_set` to convert the data.
+
+For example:
+
+``mne_setup_mri --subject duck_donald --mri T1``
+
+This command processes the MRI data set T1 for subject duck_donald.
+
+.. note:: If the SUBJECT environment variable is set it    is usually sufficient to run mne_setup_mri without    any options.
+
+.. note:: If the name specified with the ``--mri`` option    contains a slash, the MRI data are accessed from the directory specified    and the ``SUBJECT`` and ``SUBJECTS_DIR`` environment    variables as well as the ``--subject`` option are ignored.
+
+MRIlab can also be used for coordinate frame alignment.
+Section 3.3.1 of the MRIlab User's Guide,
+Neuromag P/N NM20419A-A contains a detailed description of
+this task. Employ the images in the set ``mri/T1-neuromag/sets/COR.fif`` for
+the alignment. Check the alignment carefully using the digitization
+data included in the measurement file as described in Section 5.3.1
+of the above manual. Save the aligned description file in the same
+directory as the original description file without the alignment
+information but under a different name.
+
+
+.. _BABCDBDI:
+
+Cleaning the digital trigger channel
+------------------------------------
+
+The calibration factor of the digital trigger channel used
+to be set to a value much smaller than one by the Neuromag data
+acquisition software. Especially to facilitate viewing of raw data
+in graph it is advisable to change the calibration factor to one.
+Furthermore, the eighth bit of the trigger word is coded incorrectly
+in the original raw files. Both problems can be corrected by saying:
+
+``mne_fix_stim14`` <*raw file*>
+
+More information about mne_fix_stim14 is
+available in :ref:`mne_fix_stim14`. It is recommended that this
+fix is included as the first raw data processing step. Note, however,
+the mne_browse_raw and mne_process_raw always sets
+the calibration factor to one internally.
+
+.. note:: If your data file was acquired on or after November 10, 2005 on the Martinos center Vectorview system, it is not necessary to use mne_fix_stim14 .
+
+.. _BABCDFJH:
+
+Fixing channel information
+--------------------------
+
+There are two potential discrepancies in the channel information
+which need to be fixed before proceeding:
+
+- EEG electrode locations may be incorrect
+  if more than 60 EEG channels are acquired.
+
+- The magnetometer coil identifiers are not always correct.
+
+These potential problems can be fixed with the utilities mne_check_eeg_locations and mne_fix_mag_coil_types,
+see :ref:`mne_check_eeg_locations` and :ref:`mne_fix_mag_coil_types`.
diff --git a/doc/source/manual/AppReleaseNotes.rst b/doc/manual/appendix/c_release_notes.rst
similarity index 79%
rename from doc/source/manual/AppReleaseNotes.rst
rename to doc/manual/appendix/c_release_notes.rst
index ef26b4e..5dac09f 100644
--- a/doc/source/manual/AppReleaseNotes.rst
+++ b/doc/manual/appendix/c_release_notes.rst
@@ -6,6 +6,11 @@
 Release notes
 =============
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
 This appendix contains a brief description of the changes
 in MNE software in each major release.
 
@@ -31,7 +36,7 @@ The following overall changes have been made:
   of Neuromag software was written.
 
 - The MEG sensor information is now imported from the coil definition file
-  instead of being hardcoded in the software. For details, see :ref:`BJECIGEB`.
+  instead of being hardcoded in the software.
 
 - CTF and 4D Neuroimaging sensors are now supported.
 
@@ -41,8 +46,7 @@ The following overall changes have been made:
   separated from the MNE software and now resides under a separate
   directory tree.
 
-- Support for topologically connected source spaces was added,
-  see :ref:`CIHCHDAE`.
+- Support for topologically connected source spaces was added.
 
 - A lot of bugs were fixed.
 
@@ -51,15 +55,13 @@ File conversion utilities
 
 The following import utilities were added:
 
-- mne_ctf2fiff to
-  convert CTF data to the fif format, see :ref:`BEHDEBCH`.
+- mne_ctf2fiff to convert CTF data to the fif format.
 
 - mne_tufts2fiff to convert
-  EEG data from Tufts university to fif format, see :ref:`BEHDGAIJ`.
+  EEG data from Tufts university to fif format.
 
 The output of the Matlab conversion utilities was changed
-to use structures. For details, see :ref:`BEHCCEBJ`, :ref:`BEHDIAJG`,
-and :ref:`convert_to_matlab`.
+to use structures.
 
 Matlab tools to import and export w and stc files were added.
 
@@ -81,7 +83,7 @@ The following changes have been made in mne_analyze :
 - An iterative coordinate alignment procedure was added, see
   :ref:`CACEHGCD`.
 
-- Utility to view continuous HPI information was added, see :ref:`CACIADAI`.
+- Utility to view continuous HPI information was added.
 
 - Several small changes and bug fixes were done.
 
@@ -97,7 +99,7 @@ Averaging
 The highly inefficient program mne_grand_average has
 been removed from the distribution and replaced with the combined
 use of mne_make_movie and a new
-averaging program mne_average_estimates , see :ref:`CHDFDIFE`.
+averaging program mne_average_estimates.
 
 Release notes for MNE software 2.5
 ##################################
@@ -133,8 +135,7 @@ There are several improvements in the raw data processor mne_browse_raw/mne_proc
   been updated to employ mne_browse_raw in
   viewing the averages computed from the sample raw data set.
 
-- It is now possible to create new SSP operators in mne_browse_raw ,
-  see :ref:`CACEAHEI`.
+- It is now possible to create new SSP operators in mne_browse_raw.
 
 - Listing of amplitude values have been added to both the strip-chart
   and topographical displays.
@@ -142,7 +143,7 @@ There are several improvements in the raw data processor mne_browse_raw/mne_proc
 - Text format event files can now be loaded for easy inspection
   of rejected epochs, for example.
 
-- Handling of derived channels has been added, see :ref:`CACFHAFH` and :ref:`CHDHJABJ`.
+- Handling of derived channels has been added.
 
 - SSS information is now transferred to the covariance matrix
   output files.
@@ -154,7 +155,7 @@ mne_epochs2mat
 
 This new utility extracts epochs from a raw data file, applies
 a bandpass filter to them and outputs them in a format convenient
-for processing in Matlab, see :ref:`BEHFIDCB`.
+for processing in Matlab.
 
 mne_analyze
 ===========
@@ -169,21 +170,20 @@ The following new features have been added:
   to mne_setup_source_space.
 
 - Rotation of the coordinate frame in the coordinate system
-  alignment dialog, see :ref:`CACEHGCD`.
+  alignment dialog.
 
 - Several new graphics output file formats as well as automatic
-  and snapshot output modes, see :ref:`CACIJFII`.
+  and snapshot output modes.
 
 - It is now possible to inquire timecourses from stc overlays.
   Both labels and surface picking are supported.
 
-- Added an option to include surface vertex numbers to the timecourse output,
-  see :ref:`CACJJGFA`.
+- Added an option to include surface vertex numbers to the timecourse output.
 
-- Overlays matching the scalp surface can now be loaded, see :ref:`CACFCHEC`.
+- Overlays matching the scalp surface can now be loaded.
 
 - The dipole display dialog has now control over the dipole
-  display properties. Multiple dipoles can be now displayed, see :ref:`CACGGAIA`.
+  display properties. Multiple dipoles can be now displayed.
 
 - Time stepping with cursor keys has been added.
 
@@ -202,15 +202,14 @@ frames is now included in the output file.
 mne_make_movie
 ==============
 
-Added the ``--labelverts`` option, see :ref:`CBBHHCEF`.
+Added the ``--labelverts`` option.
 
 mne_surf2bem
 ============
 
 Added the ``--shift`` option to move surface vertices
 outwards. Fixed some loopholes in topology checks. Also added the ``--innershift`` option
-to mne_setup_forward_model . For
-more information, see :ref:`CIHDBFEG` and :ref:`BEHCACCJ`.
+to mne_setup_forward_model.
 
 mne_forward_solution
 ====================
@@ -264,7 +263,7 @@ mne_show_fiff
 
 Replacement for the Neuromag utility show_fiff .
 This utility conforms to the standard command-line option conventions
-in MNE software. For details, see :ref:`CHDHEDEF`.
+in MNE software.
 
 mne_make_cor_set
 ----------------
@@ -277,14 +276,14 @@ mne_compensate_data
 -------------------
 
 This utility applies or removes CTF software gradient compensation
-from evoked-response data, see :ref:`BEHDDFBI`.
+from evoked-response data.
 
 mne_insert_4D_comp
 ------------------
 
 This utility merges 4D Magnes compensation data from a text
 file and the main helmet sensor data from a fif file and creates
-a new fif file :ref:`BEHGDDBH`.
+a new fif file :ref:`mne_insert_4D_comp`.
 
 mne_ctf_dig2fiff
 ----------------
@@ -297,21 +296,20 @@ mne_kit2fiff
 ------------
 
 The purpose of this new utility is to import data from the
-KIT MEG system, see :ref:`BEHBJGGF`.
+KIT MEG system.
 
 mne_make_derivations
 --------------------
 
 This new utility will take derivation data from a text file
-and convert it to fif format for use with mne_browse_raw ,
-see :ref:`CHDHJABJ`.
+and convert it to fif format for use with mne_browse_raw.
 
 BEM mesh generation
 ===================
 
 All information concerning BEM mesh generation has been moved
 to :ref:`create_bem_model`. Utilities for BEM mesh generation using
-FLASH images have been added, see :ref:`BABFCDJH`.
+FLASH images have been added.
 
 Matlab toolbox
 ==============
@@ -388,7 +386,7 @@ New features include:
 
 - Both in mne_browse_raw and
   in mne_analyze , a non-standard
-  default layout can be set on a user-by-user basis, see :ref:`CACFGGCF`.
+  default layout can be set on a user-by-user basis.
 
 - Added the ``--digtrigmask`` option.
 
@@ -396,10 +394,10 @@ New features include:
   or trackball.
 
 - Added remote control of the FreeSurfer MRI
-  viewer (tkmedit ), see :ref:`CACCHCBF`.
+  viewer (tkmedit ).
 
 - Added fitting of single equivalent current dipoles and channel
-  selections, see :ref:`CHDGHIJJ`.
+  selections.
 
 - Added loading of FreeSurfer cortical
   parcellation data as labels.
@@ -413,12 +411,12 @@ New features include:
 - A shortcut button to direct a file selector to the appropriate
   default directory was added to several file loading dialogs.
 
-- The vertex coordinates can now be displayed, see :ref:`CHDIEHDH`.
+- The vertex coordinates can now be displayed.
 
 mne_average_forward_solutions
 -----------------------------
 
-EEG forward solutions are now averaged as well, see :ref:`CHDBBFCA`.
+EEG forward solutions are now averaged as well.
 
 mne_browse_raw and mne_process_raw
 ----------------------------------
@@ -433,7 +431,7 @@ Improvements in the raw data processor mne_browse_raw /mne_process_raw include:
   taking into account the initial skip in the event files. The new
   format is indicated by an additional "pseudoevent" in
   the beginning of the file. mne_browse_raw and mne_process_raw are
-  still compatible with the old event file format. For details, see :ref:`CACBCEGC`.
+  still compatible with the old event file format.
 
 - Using information from the fif data files, the wall clock
   time corresponding to the current file position is shown on the
@@ -454,20 +452,20 @@ Improvements in the raw data processor mne_browse_raw /mne_process_raw include:
   systems. These kind of data should never be used as an input for source
   localization.
 
-- The ``--savehere`` option was added, see :ref:`CACFAAAJ`.
+- The ``--savehere`` option was added.
 
 - The stderr parameter was
-  added to the averaging definition files, see :ref:`CACHACHH`.
+  added to the averaging definition files.
 
 - Added compatibility with Elekta-Neuromag Report Composer (cliplab and
   improved the quality of hardcopies.
 
 - Both in mne_browse_raw and
   in mne_analyze , a non-standard
-  default layout can be set on a user-by-user basis, see :ref:`CACFGGCF`.
+  default layout can be set on a user-by-user basis.
 
 - mne_browse_raw now includes
-  an interactive editor to create derived channels, see :ref:`CACJIEHI`.
+  an interactive editor to create derived channels.
 
 - The menus in mne_browse_raw were
   reorganized and an time point specification text field was added
@@ -495,22 +493,21 @@ Improvements in the raw data processor mne_browse_raw /mne_process_raw include:
   to the File menu.
 
 - Added new browsing functionality using the mouse wheel or
-  trackball, see :ref:`BABIDADB`.
+  trackball.
 
-- Added optional items to the topographical data displays, see :ref:`CACBEHCD`.
+- Added optional items to the topographical data displays.
 
-- Added an event list window, see :ref:`BABFDICC`.
+- Added an event list window.
 
-- Added an annotator window, see :ref:`BABCIGGH`.
+- Added an annotator window.
 
 - Keep events sorted by time.
 
 - User-defined events are automatically kept in a fif-format
-  annotation file, see :ref:`BABDFAHA`.
+  annotation file.
 
 - Added the delay parameter
-  to the averaging and covariance matrix estimation description files,
-  see :ref:`CACHACHH` and :ref:`BABECIAH`.
+  to the averaging and covariance matrix estimation description files.
 
 Detailed information on these changes can be found in :ref:`ch_browse`.
 
@@ -518,7 +515,7 @@ mne_compute_raw_inverse
 -----------------------
 
 The ``--digtrig`` , ``--extra`` , ``--noextra`` , ``--split`` , ``--labeldir`` , and ``--out`` options
-were added, see :ref:`CBBCGHAH`.
+were added.
 
 mne_convert_surface
 -------------------
@@ -540,32 +537,32 @@ mne_epochs2mat
 --------------
 
 The name of the digital trigger channel can be specified
-with the MNE_TRIGGER_CH_NAME environment variable, see :ref:`BEHFIDCB`. Added
+with the MNE_TRIGGER_CH_NAME environment variable. Added
 the ``--digtrigmask`` option.
 
 mne_forward_solution
 --------------------
 
 Added code to compute the derivatives of with respect to
-the dipole position coordinates, see :ref:`CHDDIBAH`.
+the dipole position coordinates.
 
 mne_list_bem
 ------------
 
-The ``--surfno`` option is replaced with the ``--id`` option, see :ref:`BEHBBEHJ`.
+The ``--surfno`` option is replaced with the ``--id`` option.
 
 mne_make_cor_set
 ----------------
 
 Include data from mgh/mgz files to the output automatically.
 Include the Talairach transformations from the FreeSurfer data to
-the output file if possible. For details, see :ref:`BABBHHHE`.
+the output file if possible.
 
 mne_make_movie
 --------------
 
 Added the ``--noscalebar``, ``--nocomments``, ``--morphgrade``, ``--rate``,
-and ``--pickrange`` options, see :ref:`CBBECEDE`.
+and ``--pickrange`` options.
 
 mne_make_source_space
 ---------------------
@@ -573,8 +570,7 @@ mne_make_source_space
 The ``--spacing`` option is now implemented in this
 program, which means mne_mris_trix is
 now obsolete. The mne_setup_source_space script
-was modified accordingly. Support for tri, dec, and dip files was dropped,
-see :ref:`BEHCGJDD`.
+was modified accordingly. Support for tri, dec, and dip files was dropped.
 
 mne_mdip2stc
 ------------
@@ -590,19 +586,17 @@ The functionality is included in mne_process_raw .
 mne_rename_channels
 -------------------
 
-Added the ``--revert`` option, see :ref:`CHDCFEAJ`.
+Added the ``--revert`` option.
 
 mne_setup_forward_model
 -----------------------
 
-Added the ``--outershift`` and ``--scalpshift`` options,
-see :ref:`CIHDBFEG`.
+Added the ``--outershift`` and ``--scalpshift`` options.
 
 mne_simu
 --------
 
-Added source waveform expressions and the ``--raw`` option,
-see :ref:`CHDECAFD`.
+Added source waveform expressions and the ``--raw`` option.
 
 mne_transform_points
 --------------------
@@ -623,61 +617,59 @@ mne_collect_transforms
 ----------------------
 
 This utility collects coordinate transformation information
-from several sources into a single file, see :ref:`BABBIFIJ`.
+from several sources into a single file.
 
 mne_convert_dig_data
 --------------------
 
 This new utility convertes digitization (Polhemus) data between
-different file formats, see :ref:`BABCJEAD`.
+different file formats.
 
 mne_edf2fiff
 ------------
 
 This is a new utility to convert EEG data from EDF, EDF+,
-and BDF formats to the fif format, see :ref:`BABHDBBD`.
+and BDF formats to the fif format.
 
 mne_brain_vision2fiff
 ---------------------
 
 This is a new utility to convert BrainVision EEG data to
-the fif format, see :ref:`BEHCCCDC`. This utility is also
+the fif format. This utility is also
 used by the mne_eximia_2fiff script
 to convert EEG data from the Nexstim eXimia EEG system to the fif
-format, see :ref:`BEHGCEHH`.
+format.
 
 mne_anonymize
 -------------
 
 New utility to remove subject identifying information from
-measurement files, see :ref:`CHDIJHIC`.
+measurement files.
 
 mne_opengl_test
 ---------------
 
-New utility for testing the OpenGL graphics performance,
-see :ref:`CIHIIBDA`.
+New utility for testing the OpenGL graphics performance.
 
 mne_volume_data2mri
 -------------------
 
 Convert data defined in a volume created with mne_volume_source_space to
-an MRI overlay, see :ref:`BEHDEJEC`.
+an MRI overlay.
 
 mne_volume_source_space
 -----------------------
 
-Create a a grid of source points within a volume, see :ref:`BJEFEHJI`. mne_volume_source_space also
+Create a a grid of source points within a volume. mne_volume_source_space also
 optionally creates a trilinear interpolator matrix to facilitate
 converting values a distribution in the volume grid into an MRI
-overlay using mne_volume_data2mri ,
-see :ref:`BEHDEJEC`.
+overlay using mne_volume_data2mri.
 
 mne_copy_processing_history
 ---------------------------
 
 This new utility copies the processing history block from
-one data file to another, see :ref:`CJACECAH`.
+one data file to another.
 
 Release notes for MNE software 2.7
 ##################################
@@ -711,7 +703,7 @@ MNE software and setup for individual users:
 - The setup scripts have changed.
 
 The installation and user-level effects of the new software
-organization are discussed in :ref:`CHDBAFGJ` and :ref:`setup_martinos`.
+organization are discussed in :ref:`getting_started`.
 
 In addition, several minor bugs have been fixed in the source
 code. Most relevant changes visible to the user are listed below.
@@ -734,22 +726,22 @@ mne_browse_raw
 ==============
 
 - Rejection criteria to detect flat channels
-  have been added, see :ref:`BABIHFBI` and :ref:`BABCGEJE`.
+  have been added.
 
 - Possibility to detect temporal skew between trigger input
-  lines has been added, see :ref:`BABIHFBI` and :ref:`BABCGEJE`.
+  lines has been added.
 
 - ``--allowmaxshield`` option now works in the batch mode as well.
 
 - Added the ``--projevent`` option to batch mode.
 
-- It is now possible to compute an SSP operator for EEG, see :ref:`CACEAHEI`.
+- It is now possible to compute an SSP operator for EEG.
 
 mne_analyze
 ===========
 
 - Both hemispheres can now be displayed
-  simultaneously, see :ref:`CACCABEA`.
+  simultaneously.
 
 - If the source space was created with mne_make_source_space version 2.3
   or later, the subject's surface data are automatically
@@ -767,32 +759,24 @@ Miscellaneous
   and bin/admin directories.
 
 - mne_anonymize now has the
-  ``--his`` option to remove the HIS ID of the subject, see :ref:`CHDIJHIC`.
+  ``--his`` option to remove the HIS ID of the subject.
 
 - mne_check_surface now has
   the ``--bem`` and ``--id`` options to check surfaces from a BEM fif file.
-  For details, try mne_check_surface --help.
 
-- mne_compute_raw_inverse now
-  has the ``--orignames`` option, see :ref:`CHDEIHFA`.
+- mne_compute_raw_inverse now has the ``--orignames`` option.
 
-- Added ``--headcoord`` option to mne_convert_dig_data ,
-  see :ref:BABCJEAD`.
+- Added ``--headcoord`` option to mne_convert_dig_data.
 
-- Added ``--talairach`` option to mne_make_cor_set ,
-  see :ref:`BABBHHHE`.
+- Added ``--talairach`` option to mne_make_cor_set.
 
-- Added the ``--morph`` option to mne_setup_source_space and mne_make_source_space ,
-  see :ref:`CIHCHDAE` and :ref:`BEHCGJDD`, respectively.
+- Added the ``--morph`` option to mne_setup_source_space and mne_make_source_space.
 
-- Added the ``--prefix`` option to mne_morph_labels ,
-  see :ref:`CHDCEAFC`.
+- Added the ``--prefix`` option to mne_morph_labels.
 
-- Added the ``--blocks`` and ``--indent`` options to mne_show_fiff ,
-  see :ref:`CHDHEDEF`.
+- Added the ``--blocks`` and ``--indent`` options to mne_show_fiff.
 
-- Added the ``--proj`` option as well as map types 5 and 6 to mne_sensitivity_map ,
-  see :ref:`CHDDCBGI`.
+- Added the ``--proj`` option as well as map types 5 and 6 to mne_sensitivity_map.
 
 - Fixed a bug in mne_inverse_operator which
   caused erroneous calculation of EEG-only source estimates if the
@@ -806,7 +790,7 @@ mne_analyze
 ===========
 
 - Added a new restricted mode for visualizing
-  head position within the helmet, see :ref:`CHDJECCG` and Section 7.21.**doesn't exist**
+  head position within the helmet.
 
 - Added information about mne_make_scalp_surfaces to :ref:`CHDCGHIF`.
 
@@ -815,9 +799,9 @@ mne_browse_raw
 
 - Added possibility for multiple event
   parameters and the mask parameter in averaging and noise covariance
-  calculation, see :ref:`CACHACHH`.
+  calculation.
 
-- Added simple conditional averaging, see :ref:`CACHACHH`.
+- Added simple conditional averaging.
 
 Release notes for MNE software 2.7.2
 ####################################
@@ -826,7 +810,7 @@ mne_add_patch_info
 ==================
 
 Added the capability to compute distances between source
-space vertices, see :ref:`BEHCBCGG`.
+space vertices.
 
 Matlab toolbox
 ==============
@@ -835,28 +819,23 @@ Matlab toolbox
   file I/O to employ 1-based vertex numbering inside Matlab, see Table 10.11.
 
 - mne_read_source_spaces.m now reads the inter-vertex distance
-  information now optionally produced by mne_add_patch_info ,
-  see :ref:`BEHCBCGG`.
+  information now optionally produced by mne_add_patch_info.
 
 Miscellaneous
 =============
 
-- Added ``--shift`` option to mne_convert_surface ,
-  see :ref:`BABEABAA`.
+- Added ``--shift`` option to mne_convert_surface.
 
-- Added ``--alpha`` option to mne_make_movie ,
-  see :ref:`CBBBBHIF`.
+- Added ``--alpha`` option to mne_make_movie.
 
-- Added ``--noiserank`` option to mne_inverse_operator and mne_do_inverse_operator ,
-  see :ref:`CBBDDBGF` and :ref:`CIHCFJEI`, respectively.
+- Added ``--noiserank`` option to mne_inverse_operator and mne_do_inverse_operator.
 
 - The fif output from mne_convert_dig_data now
   includes the transformation between the digitizer and MNE head coordinate
-  systems if such a transformation has been requested, see :ref:`BABCJEAD`.
-  This also affects the output from mne_eximia2fiff, see :ref:`BEHGCEHH`.
+  systems if such a transformation has been requested.
+  This also affects the output from mne_eximia2fiff.
 
-- Added ``--noflash30``, ``--noconvert``, and ``--unwarp`` options to mne_flash_bem ,
-  see :ref:`BABFCDJH`.
+- Added ``--noflash30``, ``--noconvert``, and ``--unwarp`` options to mne_flash_bem.
 
 Release notes for MNE software 2.7.3
 ####################################
@@ -868,11 +847,9 @@ Miscellaneous
   information in the FreeSurfer surface files.
 
 - The ``--mghmri`` option in combination with ``--surfout`` inserts
-  the volume geometry information to the output of mne_convert_surface ,
-  see :ref:`BEHDIAJG`.
+  the volume geometry information to the output of mne_convert_surface.
 
-- Added ``--replacegeom`` option to mne_convert_surface ,
-  see :ref:`BEHDIAJG`.
+- Added ``--replacegeom`` option to mne_convert_surface.
 
 - Modified mne_watershed_bem and mne_flash_bem to
   include the volume geometry information to the output. This allows
diff --git a/doc/manual/appendix/martinos.rst b/doc/manual/appendix/martinos.rst
new file mode 100644
index 0000000..df399fc
--- /dev/null
+++ b/doc/manual/appendix/martinos.rst
@@ -0,0 +1,117 @@
+
+
+.. _setup_martinos:
+
+============================
+Setup at the Martinos Center
+============================
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
+This Appendix contains information specific to the Martinos
+Center setup.
+
+.. _user_environment_martinos:
+
+User environment
+################
+
+In the Martinos Center computer network, the 2.7 version
+of MNE is located at /usr/pubsw/packages/mne/stable. To use this
+version, follow :ref:`user_environment` substituting /usr/pubsw/packages/mne/stable
+for <*MNE*> and /usr/pubsw/packages/matlab/current
+for <*Matlab*> . For most users,
+the default shell is tcsh.
+
+.. note:: A new version of MNE is build every night from    the latest sources. This version is located at /usr/pubsw/packages/mne/nightly.
+
+.. _BABGFDJG:
+
+Using Neuromag software
+#######################
+
+Software overview
+=================
+
+The complete set of Neuromag software is available on the
+LINUX workstations. The programs can be accessed from the command
+line, see :ref:`BABFIEHC`. The corresponding manuals, located
+at ``$NEUROMAG_ROOT/manuals`` are listed in :ref:`BABCJJGF`.
+
+.. _BABFIEHC:
+
+.. table:: Principal Neuromag software modules.
+
+    ===========  =================================
+    Module       Description
+    ===========  =================================
+    xfit         Source modelling
+    xplotter     Data plotting
+    graph        General purpose data processor
+    mrilab       MEG-MRI integration
+    seglab       MRI segmentation
+    cliplab      Graphics clipboard
+    ===========  =================================
+
+.. _BABCJJGF:
+
+.. table:: List of Neuromag software manuals.
+
+    ===========  =========================================
+    Module       pdf
+    ===========  =========================================
+    xfit         XFit.pdf
+    xplotter     Xplotter.pdf
+    graph        GraphUsersGuide.pdf GraphReference.pdf
+    mrilab       Mrilab.pdf
+    seglab       Seglab.pdf
+    cliplab      Cliplab.pdf
+    ===========  =========================================
+
+To access the Neuromag software on the LINUX workstations
+in the Martinos Center, say (in tcsh or csh)
+
+``source /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_csh``
+
+or in POSIX shell
+
+``. /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_sh``
+
+Using MRIlab for coordinate system alignment
+============================================
+
+The MEG-MRI coordinate system alignment can be also accomplished with
+the Neuromag tool MRIlab, part of the standard software on Neuromag
+MEG systems.
+
+In MRIlab, the following steps are necessary for the coordinate
+system alignment:
+
+- Load the MRI description file ``COR.fif`` from ``subjects/sample/mri/T1-neuromag/sets`` through File/Open .
+
+- Open the landmark setting dialog from Windows/Landmarks .
+
+- Click on one of the coordinate setting fields on the Nasion line.
+  Click Goto . Select the crosshair
+  tool and move the crosshair to the nasion. Click Get .
+
+- Proceed similarly for the left and right auricular points.
+  Your instructor will help you with the selection of the correct
+  points.
+
+- Click OK to set the alignment
+
+- Load the digitization data from the file ``sample_audvis_raw.fif`` or ``sample_audvis-ave.fif`` (the
+  on-line evoked-response average file) in ``MEG/sample`` through File/Import/Isotrak data . Click Make points to
+  show all the digitization data on the MRI slices.
+
+- Check that the alignment is correct by looking at the locations
+  of the digitized points are reasonable. Adjust the landmark locations
+  using the Landmarks dialog, if
+  necessary.
+
+- Save the aligned file to the file suggested in the dialog
+  coming up from File/Save .
diff --git a/doc/manual/c_reference.rst b/doc/manual/c_reference.rst
new file mode 100644
index 0000000..f9c8645
--- /dev/null
+++ b/doc/manual/c_reference.rst
@@ -0,0 +1,6442 @@
+
+
+.. _c_reference:
+
+===============
+C API Reference
+===============
+
+Note that most programs have the options ``--version`` and ``--help`` which
+give the version information and usage information, respectively.
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
+List of components
+##################
+
+Software components
+===================
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
+.. table::
+
+
+    +----------------------------+--------------------------------------------+
+    | Name                       |   Purpose                                  |
+    +============================+============================================+
+    | `mne_analyze`_             | An interactive analysis tool for computing |
+    |                            | source estimates, see                      |
+    |                            | :ref:`ch_interactive_analysis`.            |
+    +----------------------------+--------------------------------------------+
+    | `mne_average_estimates`_   | Average data across subjects.              |
+    +----------------------------+--------------------------------------------+
+    | `mne_browse_raw`_          | Interactive raw data browser. Includes     |
+    |                            | filtering, offline averaging, and          |
+    |                            | computation of covariance matrices,        |
+    |                            | see :ref:`ch_browse`.                      |
+    +----------------------------+--------------------------------------------+
+    | `mne_compute_mne`_         | Computes the minimum-norm estimates,       |
+    |                            | Most functionality is included in          |
+    |                            | :ref:`mne_make_movie`.                     |
+    +----------------------------+--------------------------------------------+
+    | `mne_compute_raw_inverse`_ | Compute the inverse solution from raw data |
+    |                            | see :ref:`CBBCGHAH`.                       |
+    +----------------------------+--------------------------------------------+
+    | `mne_convert_mne_data`_    | Convert MNE data files to other file       |
+    |                            | formats.                                   |
+    +----------------------------+--------------------------------------------+
+    | `mne_do_forward_solution`_ | Convenience script to calculate the forward|
+    |                            | solution matrix, see :ref:`BABCHEJD`.      |
+    +----------------------------+--------------------------------------------+
+    | `mne_do_inverse_operator`_ | Convenience script for inverse operator    |
+    |                            | decomposition, see :ref:`CIHCFJEI`.        |
+    +----------------------------+--------------------------------------------+
+    | `mne_forward_solution`_    | Calculate the forward solution matrix, see |
+    |                            | :ref:`CHDDIBAH`.                           |
+    +----------------------------+--------------------------------------------+
+    | `mne_inverse_operator`_    | Compute the inverse operator decomposition |
+    |                            | see :ref:`CBBDDBGF`.                       |
+    +----------------------------+--------------------------------------------+
+    | `mne_make_movie`_          | Make movies in batch mode, see             |
+    |                            | :ref:`CBBECEDE`.                           |
+    +----------------------------+--------------------------------------------+
+    | `mne_make_source_space`_   | Create a *fif* source space description    |
+    |                            | file, see :ref:`BEHCGJDD`.                 |
+    +----------------------------+--------------------------------------------+
+    | `mne_process_raw`_         | A batch-mode version of mne_browse_raw,    |
+    |                            | see :ref:`ch_browse`.                      |
+    +----------------------------+--------------------------------------------+
+    | `mne_redo_file`_           | Many intermediate result files contain a   |
+    |                            | description of their                       |
+    |                            | 'production environment'. Such files can   |
+    |                            | be recreated easily with this utility.     |
+    |                            | This is convenient if, for example,        |
+    |                            | the selection of bad channels is changed   |
+    |                            | and the inverse operator decomposition has |
+    |                            | to be recalculated.                        |
+    +----------------------------+--------------------------------------------+
+    | `mne_redo_file_nocwd`_     | Works like mne_redo_file but does not try  |
+    |                            | to change in to the working directory      |
+    |                            | specified in the 'production environment'. |
+    +----------------------------+--------------------------------------------+
+    | `mne_setup_forward_model`_ | Set up the BEM-related fif files,          |
+    |                            | see :ref:`CIHDBFEG`.                       |
+    +----------------------------+--------------------------------------------+
+    | `mne_setup_mri`_           | A convenience script to create the fif     |
+    |                            | files describing the anatomical MRI data,  |
+    |                            | see :ref:`BABCCEHF`                        |
+    +----------------------------+--------------------------------------------+
+    | `mne_setup_source_space`_  | A convenience script to create source space|
+    |                            | description file, see :ref:`CIHCHDAE`.     |
+    +----------------------------+--------------------------------------------+
+    | `mne_show_environment`_    | Show information about the production      |
+    |                            | environment of a file.                     |
+    +----------------------------+--------------------------------------------+
+
+
+.. _ch_misc:
+
+Utilities
+=========
+
+.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
+.. _BABDJHGH:
+.. table::
+
+    +----------------------------------+--------------------------------------------+
+    | Name                             |   Purpose                                  |
+    +==================================+============================================+
+    | `mne_add_patch_info`_            | Add neighborhood information to a source   |
+    |                                  | space file.                                |
+    +----------------------------------+--------------------------------------------+
+    | `mne_add_to_meas_info`_          | Utility to add new information to the      |
+    |                                  | measurement info block of a fif file. The  |
+    |                                  | source of information is another fif file. |
+    +----------------------------------+--------------------------------------------+
+    | `mne_add_triggers`_              | Modify the trigger channel STI 014 in a raw|
+    |                                  | data file. The same effect can be reached  |
+    |                                  | by using an event file for averaging in    |
+    |                                  | :ref:`mne_process_raw` and                 |
+    |                                  | :ref:`mne_browse_raw`.                     |
+    +----------------------------------+--------------------------------------------+
+    | `mne_annot2labels`_              | Convert parcellation data into label files.|
+    +----------------------------------+--------------------------------------------+
+    | `mne_anonymize`_                 | Remove subject-specific information from a |
+    |                                  | fif data file.                             |
+    +----------------------------------+--------------------------------------------+
+    | `mne_average_forward_solutions`_ | Calculate an average of forward solutions, |
+    |                                  | see :ref:`CHDBBFCA`.                       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_brain_vision2fiff`_         | Convert EEG data from BrainVision format   |
+    |                                  | to fif format.                             |
+    +----------------------------------+--------------------------------------------+
+    | `mne_change_baselines`_          | Change the dc offsets according to         |
+    |                                  | specifications given in a text file.       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_change_nave`_               | Change the number of averages in an        |
+    |                                  | evoked-response data file. This is often   |
+    |                                  | necessary if the file was derived from     |
+    |                                  | several files.                             |
+    +----------------------------------+--------------------------------------------+
+    | `mne_check_eeg_locations`_       | Checks that the EEG electrode locations    |
+    |                                  | have been correctly transferred from the   |
+    |                                  | Polhemus data block to the channel         |
+    |                                  | information tags                           |
+    +----------------------------------+--------------------------------------------+
+    | `mne_check_surface`_             | Check the validity of a FreeSurfer surface |
+    |                                  | file or one of the surfaces within a BEM   |
+    |                                  | file. This program simply checks for       |
+    |                                  | topological errors in surface files.       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_collect_transforms`_        | Collect coordinate transformations from    |
+    |                                  | several sources into a single fif file.    |
+    +----------------------------------+--------------------------------------------+
+    | `mne_compensate_data`_           | Change the applied software gradient       |
+    |                                  | compensation in an evoked-response data    |
+    |                                  | file, see :ref:`BEHDDFBI`.                 |
+    +----------------------------------+--------------------------------------------+
+    | `mne_copy_processing_history`_   | Copy the processing history between files. |
+    +----------------------------------+--------------------------------------------+
+    | `mne_convert_dig_data`_          | Convert digitization data between          |
+    |                                  | different formats.                         |
+    +----------------------------------+--------------------------------------------+
+    | `mne_convert_lspcov`_            | Convert the LISP format noise covariance   |
+    |                                  | matrix output by graph into fif.           |
+    +----------------------------------+--------------------------------------------+
+    | `mne_convert_ncov`_              | Convert the ncov format noise covariance   |
+    |                                  | file to fif.                               |
+    +----------------------------------+--------------------------------------------+
+    | `mne_convert_surface`_           | Convert FreeSurfer and text format surface |
+    |                                  | files into Matlab mat files.               |
+    +----------------------------------+--------------------------------------------+
+    | `mne_cov2proj`_                  | Pick eigenvectors from a covariance matrix |
+    |                                  | and create a signal-space projection (SSP) |
+    |                                  | file out of them.                          |
+    +----------------------------------+--------------------------------------------+
+    | `mne_create_comp_data`_          | Create a fif file containing software      |
+    |                                  | gradient compensation information from a   |
+    |                                  | text file.                                 |
+    +----------------------------------+--------------------------------------------+
+    | `mne_ctf2fiff`_                  | Convert a CTF ds folder into a fif file.   |
+    +----------------------------------+--------------------------------------------+
+    | `mne_ctf_dig2fiff`_              | Convert text format digitization data to   |
+    |                                  | fif format.                                |
+    +----------------------------------+--------------------------------------------+
+    | `mne_dicom_essentials`_          | List essential information from a          |
+    |                                  | DICOM file.                                |
+    |                                  | This utility is used by the script         |
+    |                                  | mne_organize_dicom, see :ref:`BABEBJHI`.   |
+    +----------------------------------+--------------------------------------------+
+    | `mne_edf2fiff`_                  | Convert EEG data from the EDF/EDF+/BDF     |
+    |                                  | formats to the fif format.                 |
+    +----------------------------------+--------------------------------------------+
+    | `mne_epochs2mat`_                | Apply bandpass filter to raw data and      |
+    |                                  | extract epochs for subsequent processing   |
+    |                                  | in Matlab.                                 |
+    +----------------------------------+--------------------------------------------+
+    | `mne_evoked_data_summary`_       | List summary of averaged data from a fif   |
+    |                                  | file to the standard output.               |
+    +----------------------------------+--------------------------------------------+
+    | `mne_eximia2fiff`_               | Convert EEG data from the Nexstim eXimia   |
+    |                                  | system to fif format.                      |
+    +----------------------------------+--------------------------------------------+
+    | `mne_fit_sphere_to_surf`_        | Fit a sphere to a surface given in fif     |
+    |                                  | or FreeSurfer format.                      |
+    +----------------------------------+--------------------------------------------+
+    | `mne_fix_mag_coil_types`_        | Update the coil types for magnetometers    |
+    |                                  | in a fif file.                             |
+    +----------------------------------+--------------------------------------------+
+    | `mne_fix_stim14`_                | Fix coding errors of trigger channel       |
+    |                                  | STI 014, see :ref:`BABCDBDI`.              |
+    +----------------------------------+--------------------------------------------+
+    | `mne_flash_bem`_                 | Create BEM tessellation using multi-echo   |
+    |                                  | FLASH MRI data, see :ref:`BABFCDJH`.       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_insert_4D_comp`_            | Read Magnes compensation channel data from |
+    |                                  | a text file and merge it with raw data     |
+    |                                  | from other channels in a fif file.         |
+    +----------------------------------+--------------------------------------------+
+    | `mne_kit2fiff`_                  | Convert KIT data to FIF.                   |
+    +----------------------------------+--------------------------------------------+
+    | `mne_list_bem`_                  | List BEM information in text format.       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_list_coil_def`_             | Create the coil description file. This     |
+    |                                  | is run automatically at when the software  |
+    |                                  | is set up, see :ref:`BJEHHJIJ`.            |
+    +----------------------------------+--------------------------------------------+
+    | `mne_list_proj`_                 | List signal-space projection data from a   |
+    |                                  | fif file.                                  |
+    +----------------------------------+--------------------------------------------+
+    | `mne_list_source_space`_         | List source space information in text      |
+    |                                  | format suitable for importing into         |
+    |                                  | Neuromag MRIlab.                           |
+    +----------------------------------+--------------------------------------------+
+    | `mne_list_versions`_             | List versions and compilation dates of MNE |
+    |                                  | software modules.                          |
+    +----------------------------------+--------------------------------------------+
+    | `mne_make_cor_set`_              | Used by mne_setup_mri to create fif format |
+    |                                  | MRI description files from COR or mgh/mgz  |
+    |                                  | format MRI data, see :ref:`BABCCEHF`.      |
+    +----------------------------------+--------------------------------------------+
+    | `mne_make_derivations`_          | Create a channel derivation data file.     |
+    +----------------------------------+--------------------------------------------+
+    | `mne_make_eeg_layout`_           | Make a topographical trace layout file     |
+    |                                  | using the EEG electrode locations from     |
+    |                                  | an actual measurement.                     |
+    +----------------------------------+--------------------------------------------+
+    | `mne_make_morph_maps`_           | Precompute the mapping data needed for     |
+    |                                  | morphing between subjects, see             |
+    |                                  | :ref:`CHDBBHDH`.                           |
+    +----------------------------------+--------------------------------------------+
+    | `mne_make_uniform_stc`_          | Create a spatially uniform stc file for    |
+    |                                  | testing purposes.                          |
+    +----------------------------------+--------------------------------------------+
+    | `mne_mark_bad_channels`_         | Update the list of unusable channels in    |
+    |                                  | a data file                                |
+    +----------------------------------+--------------------------------------------+
+    | `mne_morph_labels`_              | Morph label file definitions between       |
+    |                                  | subjects.                                  |
+    +----------------------------------+--------------------------------------------+
+    | `mne_organize_dicom`_            | Organized DICOM MRI image files into       |
+    |                                  | directories, see :ref:`BABEBJHI`.          |
+    +----------------------------------+--------------------------------------------+
+    | `mne_prepare_bem_model`_         | Perform the geometry calculations for      |
+    |                                  | BEM forward solutions, see :ref:`CHDJFHEB`.|
+    +----------------------------------+--------------------------------------------+
+    | `mne_process_stc`_               | Manipulate stc files.                      |
+    +----------------------------------+--------------------------------------------+
+    | `mne_raw2mat`_                   | Convert raw data into a Matlab file.       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_rename_channels`_           | Change the names and types of channels     |
+    |                                  | in a fif file.                             |
+    +----------------------------------+--------------------------------------------+
+    | `mne_sensitivity_map`_           | Compute a sensitivity map and output       |
+    |                                  | the result in a w-file.                    |
+    +----------------------------------+--------------------------------------------+
+    | `mne_sensor_locations`_          | Create a file containing the sensor        |
+    |                                  | locations in text format.                  |
+    +----------------------------------+--------------------------------------------+
+    | `mne_show_fiff`_                 | List contents of a fif file.               |
+    +----------------------------------+--------------------------------------------+
+    | `mne_simu`_                      | Simulate MEG and EEG data.                 |
+    +----------------------------------+--------------------------------------------+
+    | `mne_smooth`_                    | Smooth a w or stc file.                    |
+    +----------------------------------+--------------------------------------------+
+    | `mne_surf2bem`_                  | Create a *fif* file describing the         |
+    |                                  | triangulated compartment boundaries for    |
+    |                                  | the boundary-element model (BEM),          |
+    |                                  | see :ref:`BEHCACCJ`.                       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_toggle_skips`_              | Change data skip tags in a raw file into   |
+    |                                  | ignored skips or vice versa.               |
+    +----------------------------------+--------------------------------------------+
+    | `mne_transform_points`_          | Transform between MRI and MEG head         |
+    |                                  | coordinate frames.                         |
+    +----------------------------------+--------------------------------------------+
+    | `mne_tufts2fiff`_                | Convert EEG data from the Tufts            |
+    |                                  | University format to fif format.           |
+    +----------------------------------+--------------------------------------------+
+    | `mne_view_manual`_               | Starts a PDF reader to show this manual    |
+    |                                  | from its standard location.                |
+    +----------------------------------+--------------------------------------------+
+    | `mne_volume_data2mri`_           | Convert volumetric data defined in a       |
+    |                                  | source space created with                  |
+    |                                  | mne_volume_source_space into an MRI        |
+    |                                  | overlay.                                   |
+    +----------------------------------+--------------------------------------------+
+    | `mne_volume_source_space`_       | Make a volumetric source space,            |
+    |                                  | see :ref:`BJEFEHJI`.                       |
+    +----------------------------------+--------------------------------------------+
+    | `mne_watershed_bem`_             | Do the segmentation for BEM using the      |
+    |                                  | watershed algorithm, see :ref:`BABBDHAG`.  |
+    +----------------------------------+--------------------------------------------+
+
+
+Software component command-line arguments
+#########################################
+
+.. _mne_analyze:
+
+mne_analyze
+===========
+
+Since mne_analyze is primarily an interactive analysis tool, there are only a
+few command-line options:
+
+``\---cd <*dir*>``
+
+    Change to this directory before starting.
+
+``\---subject <*name*>``
+
+    Specify the default subject name for surface loading.
+
+``\---digtrig <*name*>``
+
+    Name of the digital trigger channel. The default value is 'STI
+    014'. Underscores in the channel name will be replaced
+    by spaces.
+
+``\---digtrigmask <*number*>``
+
+    Mask to be applied to the raw data trigger channel values before considering
+    them. This option is useful if one wants to set some bits in a don't
+    care state. For example, some finger response pads keep the trigger
+    lines high if not in use, *i.e.*, a finger is
+    not in place. Yet, it is convenient to keep these devices permanently
+    connected to the acquisition system. The number can be given in
+    decimal or hexadecimal format (beginning with 0x or 0X). For example,
+    the value 255 (0xFF) means that only the lowest order byte (usually
+    trigger lines 1 - 8 or bits 0 - 7) will be considered.
+
+``\---visualizehpi``
+
+    Start mne_analyze in the restricted *head
+    position visualization* mode. For details, see :ref:`CHDEDFAE`.
+
+``\---dig <*filename*>``
+
+    Specify a file containing the head shape digitization data. This option
+    is only usable if the *head position visualization* position
+    visualization mode has been first invoked with the --visualizehpi
+    option.
+
+``\---hpi <*filename*>``
+
+    Specify a file containing the transformation between the MEG device
+    and head coordinate frames. This option is only usable if the *head
+    position visualization* position visualization mode has
+    been first invoked with the ``--visualizehpi`` option.
+
+``\---scalehead``
+
+    In *head position visualization* mode, scale
+    the average scalp surface according to the head surface digitization
+    data before aligning  them to the scalp surface. This option is
+    recommended.
+
+``\---rthelmet``
+
+    Use the room-temperature helmet surface instead of the MEG sensor
+    surface when showing the relative position of the MEG sensors and
+    the head in the *head position visualization* mode.
+
+.. note:: Before starting mne_analyze the ``SUBJECTS_DIR`` environment variable has to be set.
+
+.. note:: Strictly speaking, trigger mask value zero would mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_analyze .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+
+.. _mne_average_estimates:
+
+mne_average_estimates
+=====================
+This is a utility for averaging data in stc files. It requires that
+all stc files represent data on one individual's cortical
+surface and contain identical sets of vertices. mne_average_estimates uses
+linear interpolation to resample data in time as necessary. The
+command line arguments are:
+
+``---desc <filenname>``
+
+    Specifies the description file for averaging. The format of this
+    file is described below.
+
+The description file
+--------------------
+
+The description file for mne_average_estimates consists
+of a sequence of tokens, separated by whitespace (space, tab, or
+newline). If a token consists of several words it has to be enclosed
+in quotes. One or more tokens constitute an phrase, which has a
+meaning for the averaging definition. Any line starting with the
+pound sign (#) is a considered to be a comment line. There are two
+kinds of phrases in the description file: global and contextual.
+The global phrases have the same meaning independent on their location
+in the file while the contextual phrases have different effects depending
+on their location in the file.
+
+There are three types of contexts in the description file:
+the global context, an input context,
+and the output context. In the
+beginning of the file the context is global for
+defining global parameters. The input context
+defines one of the input files (subjects) while the output context
+specifies the destination for the average.
+
+The global phrases are:
+
+``tmin <*value/ms*>``
+
+    The minimum time to be considered. The output stc file starts at
+    this time point if the time ranges of the stc files include this
+    time. Otherwise the output starts from the next later available
+    time point.
+
+``tstep <*step/ms*>``
+
+    Time step between consecutive movie frames, specified in milliseconds.
+
+``tmax <*value/ms*>``
+
+    The maximum time point to be considered. A multiple of tstep will be
+    added to the first time point selected until this value or the last time
+    point in one of the input stc files is reached.
+
+``integ  <:math:`\Delta t` /*ms*>``
+
+    Integration time for each frame. Defaults to zero. The integration will
+    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
+    the integration range will be :math:`t_0 - ^{\Delta t}/_2 \leq t \leq t_0 + ^{\Delta t}/_2`.
+
+``stc <*filename*>``
+
+    Specifies an input stc file. The filename can be specified with
+    one of the ``-lh.stc`` and ``-rh.stc`` endings
+    or without them. This phrase ends the present context and starts
+    an input context.
+
+``deststc <*filename*>``
+
+    Specifies the output stc file. The filename can be specified with
+    one of the ``-lh.stc`` and ``-rh.stc`` endings
+    or without them. This phrase ends the present context and starts
+    the output context.
+
+``lh``
+
+    Process the left hemisphere. By default, both hemispheres are processed.
+
+``rh``
+
+    Process the left hemisphere. By default, both hemispheres are processed.
+
+The contextual phrases are:
+
+``weight <*value*>``
+
+    Specifies the weight of the current data set. This phrase is valid
+    in the input and output contexts.
+
+``abs``
+
+    Specifies that the absolute value of the data should be taken. Valid
+    in all contexts. If specified in the global context, applies to
+    all subsequent input and output contexts. If specified in the input
+    or output contexts, applies only to the data associated with that
+    context.
+
+``pow <*value*>``
+
+    Specifies that the data should raised to the specified power. For
+    negative values, the absolute value of the data will be taken and
+    the negative sign will be transferred to the result, unless abs is
+    specified. Valid in all contexts. Rules of application are identical
+    to abs .
+
+``sqrt``
+
+    Means pow 0.5
+
+The effects of the options can be summarized as follows.
+Suppose that the description file includes :math:`P` contexts
+and the temporally resampled data are organized in matrices :math:`S^{(p)}`,
+where :math:`p = 1 \dotso P` is the subject index, and
+the rows are the signals at different vertices of the cortical surface.
+The average computed by mne_average_estimates is
+then:
+
+.. math::    A_{jk} = |w[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(B_{jk})]^{\alpha}|B_{jk}|^{\beta}
+
+with
+
+.. math::    B_{jk} = \sum_{p = 1}^p {\bar{w_p}[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(S_{jk}^{(p)})^{\alpha_p}|S_{jk}^{(p)}|^{\beta_p}}
+
+and
+
+.. math::    \bar{w_p} = w_p / \sum_{p = 1}^p {|w_p|}\ .
+
+In the above, :math:`\beta_p` and :math:`w_p` are
+the powers and weights assigned to each of the subjects whereas :math:`\beta` and :math:`w` are
+the output weight and power value, respectively. The sign is either
+included (:math:`\alpha_p = 1`, :math:`\alpha = 1`)
+or omitted (:math:`\alpha_p = 2`, :math:`\alpha = 2`)
+depending on the presence of abs phrases in the description file.
+
+.. note:: mne_average_estimates requires    that the number of vertices in the stc files are the same and that    the vertex numbers are identical. This will be the case if the files    have been produced in mne_make_movie using    the ``--morph`` option.
+
+.. note:: It is straightforward to read and write stc    files using the MNE Matlab toolbox described in :ref:`ch_matlab` and    thus write custom Matlab functions to realize more complicated custom    group analysis tools.
+
+
+.. _mne_browse_raw:
+
+mne_browse_raw
+==============
+
+``--cd <*dir*>``
+
+    Change to this directory before starting.
+
+``--raw <*name*>``
+
+    Specifies the raw data file to be opened. If a raw data file is not
+    specified, an empty interactive browser will open.
+
+``--grad <*number*>``
+
+    Apply software gradient compensation of the given order to the data loaded
+    with the ``--raw`` option. This option is effective only
+    for data acquired with the CTF and 4D Magnes MEG systems. If orders
+    different from zero are requested for Neuromag data, an error message appears
+    and data are not loaded. Any compensation already existing in the
+    file can be undone or changed to another order by using an appropriate ``--grad`` options.
+    Possible orders are 0 (No compensation), 1 - 3 (CTF data), and 101
+    (Magnes data). This applies only to the data file loaded by specifying the ``--raw`` option.
+    For interactive data loading, the software gradient compensation
+    is specified in the corresponding file selection dialog, see :ref:`CACDCHAJ`.
+
+``--filtersize <*size*>``
+
+    Adjust the length of the FFT to be applied in filtering. The number will
+    be rounded up to the next power of two. If the size is :math:`N`,
+    the corresponding length of time is :math:`N/f_s`,
+    where :math:`f_s` is the sampling frequency
+    of your data. The filtering procedure includes overlapping tapers
+    of length :math:`N/2` so that the total FFT
+    length will actually be :math:`2N`. This
+    value cannot be changed after the program has been started.
+
+``--highpass <*value/Hz*>``
+
+    Highpass filter frequency limit. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered. It
+    is best to experiment with the interactive version to find the lowest applicable
+    filter for your data. This value can be adjusted in the interactive
+    version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+``--highpassw <*value/Hz*>``
+
+    The width of the transition band of the highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`. This
+    value cannot be adjusted in the interactive version of the program.
+
+``--lowpass <*value/Hz*>``
+
+    Lowpass filter frequency limit. This value can be adjusted in the interactive
+    version of the program. The default is 40 Hz.
+
+``--lowpassw <*value/Hz*>``
+
+    The width of the transition band of the lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+``--eoghighpass <*value/Hz*>``
+
+    Highpass filter frequency limit for EOG. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered.
+    It is best to experiment with the interactive version to find the
+    lowest applicable filter for your data. This value can be adjusted in
+    the interactive version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+``--eoghighpassw <*value/Hz*>``
+
+    The width of the transition band of the EOG highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`.
+    This value cannot be adjusted in the interactive version of the
+    program.
+
+``--eoglowpass <*value/Hz*>``
+
+    Lowpass filter frequency limit for EOG. This value can be adjusted in
+    the interactive version of the program. The default is 40 Hz.
+
+``--eoglowpassw <*value/Hz*>``
+
+    The width of the transition band of the EOG lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+``--filteroff``
+
+    Do not filter the data. This initial value can be changed in the
+    interactive version of the program.
+
+``--digtrig <*name*>``
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+``--digtrigmask <*number*>``
+
+    Mask to be applied to the trigger channel values before considering them.
+    This option is useful if one wants to set some bits in a don't care
+    state. For example, some finger response pads keep the trigger lines
+    high if not in use, *i.e.*, a finger is not in
+    place. Yet, it is convenient to keep these devices permanently connected
+    to the acquisition system. The number can be given in decimal or
+    hexadecimal format (beginning with 0x or 0X). For example, the value
+    255 (0xFF) means that only the lowest order byte (usually trigger
+    lines 1 - 8 or bits 0 - 7) will be considered.
+
+``--allowmaxshield``
+
+    Allow loading of unprocessed Elekta-Neuromag data with MaxShield
+    on. These kind of data should never be used for source localization
+    without further processing with Elekta-Neuromag software.
+
+``--deriv <*name*>``
+
+    Specifies the name of a derivation file. This overrides the use
+    of a standard derivation file, see :ref:`CACFHAFH`.
+
+``--sel <*name*>``
+
+    Specifies the channel selection file to be used. This overrides
+    the use of the standard channel selection files, see :ref:`CACCJEJD`.
+
+.. note:: Strictly speaking, trigger mask value zero would mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable value will *not* be replaced with spaces. Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+
+
+.. _mne_compute_mne:
+
+mne_compute_mne
+===============
+
+This program is gradually becoming obsolete. All of its functions will
+be eventually included to :ref:`mne_make_movie`,
+see :ref:`CBBECEDE`. At this time, :ref:`mne_compute_mne` is
+still needed to produce time-collapsed w files unless you are willing
+to write a Matlab script of your own for this purpose.
+
+
+``--inv <*name*>``
+
+    Load the inverse operator decomposition from here.
+
+``--meas <*name*>``
+
+    Load the MEG or EEG data from this file.
+
+``--set <*number*>``
+
+    The data set (condition) number to load. The list of data sets can
+    be seen, *e.g.*, in mne_analyze , mne_browse_raw ,
+    and xplotter .
+
+``--bmin <*time/ms*>``
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present.
+
+``--bmax <*time/ms*>``
+
+    Specifies the finishing time of the baseline.
+
+``--nave <*value*>``
+
+    Specifies the number of averaged epochs in the input data. If the input
+    data file is one produced by mne_process_raw or mne_browse_raw ,
+    the number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced, *e.g.*,
+    by using the xplotter software, the
+    number of averages should be manually adjusted. This is accomplished
+    either by employing this flag or by adjusting the number of averages
+    in the data file with help of mne_change_nave .
+
+``--snr <*value*>``
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda = ^1/_{\text{SNR}}`. If the SNR option is
+    absent, the regularization parameter will be estimated from the
+    data. The regularization parameter will be then time dependent.
+
+``--snronly``
+
+    Only estimate SNR and output the result into a file called SNR. Each
+    line of the file contains three values: the time point in ms, the estimated
+    SNR + 1, and the regularization parameter estimated from the data
+    at this time point.
+
+``--abs``
+
+    Calculate the absolute value of the current and the dSPM for fixed-orientation
+    data.
+
+``--spm``
+
+    Calculate the dSPM instead of the expected current value.
+
+``--chi2``
+
+    Calculate an approximate :math:`\chi_2^3` statistic
+    instead of the *F* statistic. This is simply
+    accomplished by multiplying the *F* statistic
+    by three.
+
+``--sqrtF``
+
+    Take the square root of the :math:`\chi_2^3` or *F* statistic
+    before outputting the stc file.
+
+``--collapse``
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the maximum value of the output quantity
+    at this location over the analysis period. This option is convenient
+    for determining the correct thresholds for the rendering of the
+    final brain-activity movies.
+
+``--collapse1``
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the :math:`L_1` norm
+    of the output quantity at this location over the analysis period.
+
+``--collapse2``
+
+    Make all frames in the stc file (or the wfile) identical. The value
+    at each source location is the :math:`L_2` norm
+    of the output quantity at this location over the analysis period.
+
+``--SIcurrents``
+
+    Output true current values in SI units (Am). By default, the currents are
+    scaled so that the maximum current value is set to 50 (Am).
+
+``--out <*name*>``
+
+    Specifies the output file name. This is the 'stem' of
+    the output file name. The actual name is derived by removing anything up
+    to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. Finally, ``.stc`` or ``.w`` is added,
+    depending on the output file type.
+
+``--wfiles``
+
+    Use binary w-files in the output whenever possible. The noise-normalization
+    factors can be always output in this format.  The current estimates
+    and dSPMs can be output as wfiles if one of the collapse options
+    is selected.
+
+``--pred <*name*>``
+
+    Save the predicted data into this file. This is a fif file containing
+    the predicted data waveforms, see :ref:`CHDCACDC`.
+
+``--outputnorm <*name*>``
+
+    Output noise-normalization factors to this file.
+
+``--invnorm``
+
+    Output inverse noise-normalization factors to the file defined by
+    the ``--outputnorm`` option.
+
+``--dip <*name*>``
+
+    Specifies a dipole distribution snapshot file. This is a file containing the
+    current distribution at a time specified with the ``--diptime`` option.
+    The file format is the ASCII dip file format produced by the Neuromag
+    source modelling software (xfit). Therefore, the file can be loaded
+    to the Neuromag MRIlab MRI viewer to display the actual current
+    distribution. This option is only effective if the ``--spm`` option
+    is absent.
+
+``--diptime <*time/ms*>``
+
+    Time for the dipole snapshot, see ``--dip`` option above.
+
+``--label <*name*>``
+
+    Label to process. The label files are produced by tksurfer and specify
+    regions of interests (ROIs). A label file name should end with ``-lh.label`` for
+    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
+    ones. The corresponding output files are tagged with ``-lh-`` <*data type* ``.amp`` and ``-rh-`` <*data type* ``.amp`` , respectively. <*data type*> equals ``MNE`` for expected current
+    data and ``spm`` for dSPM data. Each line of the output
+    file contains the waveform of the output quantity at one of the
+    source locations falling inside the ROI.
+
+.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_compute_mne have    been removed. mne_compute_mne can now    process only the entire averaged epoch.
+
+
+.. _mne_compute_raw_inverse:
+
+mne_compute_raw_inverse
+=======================
+
+``--in <*filename*>``
+
+    Specifies the input data file. This can be either an evoked data
+    file or a raw data file.
+
+``--bmin <*time/ms*>``
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present. This option applies to evoked data only.
+
+``--bmax <*time/ms*>``
+
+    Specifies the finishing time of the baseline. This option applies
+    to evoked data only.
+
+``--set <*number*>``
+
+    The data set (condition) number to load. This is the sequential
+    number of the condition. You can easily see the association by looking
+    at the condition list in mne_analyze when
+    you load the file.
+
+``--inv <*name*>``
+
+    Load the inverse operator decomposition from here.
+
+``--nave <*value*>``
+
+    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
+    as discussed in :ref:`CBBDGIAE`. If the input data file is
+    one produced by mne_browse_raw or mne_process_raw ,
+    the number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced,
+    e.g., by  using the xplotter software,
+    the number of averages should be manually adjusted along the guidelines
+    given in :ref:`CBBDGIAE`. This is accomplished either by
+    employing this flag or by adjusting the number of averages in the
+    data file with help of the utility mne_change_nave .
+
+``--snr <*value*>``
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
+    SNR = 1. Automatic selection of the regularization parameter is
+    currently not supported.
+
+``--spm``
+
+    Calculate the dSPM instead of the expected current value.
+
+``--picknormalcomp``
+
+    The components of the estimates corresponding to directions tangential
+    with the cortical mantle are zeroed out.
+
+``--mricoord``
+
+    Provide source locations and orientations in the MRI coordinate frame
+    instead of the default head coordinate frame.
+
+``--label <*name*>``
+
+    Specifies a label file to process. For each label file, the values
+    of the computed estimates stored in a fif file. For more details,
+    see :ref:`CBBHJDAI`. The label files are produced by tksurfer
+    or mne_analyze and specify regions
+    of interests (ROIs). A label file name should end with ``-lh.label`` for
+    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
+    ones. The corresponding output files are tagged with ``-lh-`` <*data type*> ``.fif`` and ``-rh-`` <*data type*> ``.fif`` , respectively. <*data type*> equals ``'mne`` ' for expected
+    current data and ``'spm`` ' for dSPM data.
+    For raw data, ``_raw.fif`` is employed instead of ``.fif`` .
+    The output files are stored in the same directory as the label files.
+
+``--labelselout``
+
+    Produces additional label files for each label processed, containing only
+    those vertices within the input label which correspond to available
+    source space vertices in the inverse operator. These files have the
+    same name as the original label except that ``-lh`` and ``-rh`` are replaced
+    by ``-sel-lh`` and ``-sel-rh`` , respectively.
+
+``--align_z``
+
+    Instructs the program to try to align the waveform signs within
+    the label. For more information, see :ref:`CBBHJDAI`. This
+    flag will not have any effect if the inverse operator has been computed
+    with the strict orientation constraint active.
+
+``--labeldir <*directory*>``
+
+    All previous ``--label`` options will be ignored when this
+    option is encountered. For each label in the directory, the output
+    file defined with the ``--out`` option will contain a summarizing
+    waveform which is the average of the waveforms in the vertices of
+    the label. The ``--labeldir`` option implies ``--align_z`` and ``--picknormalcomp`` options.
+
+``--orignames``
+
+    This option is used with the ``--labeldir`` option, above.
+    With this option, the output file channel names will be the names
+    of the label files, truncated to 15 characters, instead of names
+    containing the vertex numbers.
+
+``--out <*name*>``
+
+    Required with ``--labeldir`` . This is the output file for
+    the data.
+
+``--extra <*name*>``
+
+    By default, the output includes the current estimate signals and
+    the digital trigger channel, see ``--digtrig`` option,
+    below. With the ``--extra`` option, a custom set of additional
+    channels can be included. The extra channel text file should contain
+    the names of these channels, one channel name on each line. With
+    this option present, the digital trigger channel is not included
+    unless specified in the extra channel file.
+
+``--noextra``
+
+    No additional channels will be included with this option present.
+
+``--digtrig <*name*>``
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+``--split <*size/MB*>``
+
+    Specifies the maximum size of the raw data files saved. By default, the
+    output is split into files which are just below 2 GB so that the
+    fif file maximum size is not exceed.
+
+.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_compute_raw_inverse .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+
+.. _mne_convert_mne_data:
+
+mne_convert_mne_data
+====================
+
+This utility allows the conversion of various fif files related to the MNE
+computations to other formats. The two principal purposes of this utility are
+to facilitate development of new analysis approaches with Matlab
+and conversion of the forward model and noise covariance matrix
+data into evoked-response type fif files, which can be accessed
+and displayed with the Neuromag source modelling software.
+
+.. note:: Most of the functions of mne_convert_mne_data are    now covered by the MNE Matlab toolbox covered in :ref:`ch_matlab`.    This toolbox is recommended to avoid creating additional files occupying    disk space.
+
+The command-line options recognized by mne_convert_mne_data are:
+
+``--fwd <*name*>``
+
+    Specity the name of the forward solution file to be converted. Channels
+    specified with the ``--bad`` option will be excluded from
+    the file.
+
+``--fixed``
+
+    Convert the forward solution to the fixed-orientation mode before outputting
+    the converted file. With this option only the field patterns corresponding
+    to a dipole aligned with the estimated cortex surface normal are
+    output.
+
+``--surfsrc``
+
+    When outputting a free-orientation forward model (three orthogonal dipole
+    components present) rotate the dipole coordinate system at each
+    source node so that the two tangential dipole components are output
+    first, followed by the field corresponding to the dipole aligned
+    with the estimated cortex surface normal. The orientation of the
+    first two dipole components in the tangential plane is arbitrarily selected
+    to create an orthogonal coordinate system.
+
+``--noiseonly``
+
+    When creating a 'measurement' fif file, do not
+    output a forward model file, just the noise-covariance matrix.
+
+``--senscov <*name*>``
+
+    Specifies the fif file containing a sensor covariance matrix to
+    be included with the output. If no other input files are specified
+    only the covariance matrix is output
+
+``--srccov <*name*>``
+
+    Specifies the fif file containing the source covariance matrix to
+    be included with the output. Only diagonal source covariance files
+    can be handled at the moment.
+
+``--bad <*name*>``
+
+    Specifies the name of the file containing the names of the channels to
+    be omitted, one channel name per line. This does not affect the output
+    of the inverse operator since the channels have been already selected
+    when the file was created.
+
+``--fif``
+
+    Output the forward model and the noise-covariance matrix into 'measurement' fif
+    files. The forward model files are tagged with <*modalities*> ``-meas-fwd.fif`` and
+    the noise-covariance matrix files with <*modalities*> ``-meas-cov.fif`` .
+    Here, modalities is ``-meg`` if MEG is included, ``-eeg`` if
+    EEG is included, and ``-meg-eeg`` if both types of signals
+    are present. The inclusion of modalities is controlled by the ``--meg`` and ``--eeg`` options.
+
+``--mat``
+
+    Output the data into MATLAB mat files. This is the default. The
+    forward model files are tagged with <*modalities*> ``-fwd.mat`` forward model
+    and noise-covariance matrix output, with ``-inv.mat`` for inverse
+    operator output, and with ``-inv-meas.mat`` for combined inverse
+    operator and measurement data output, respectively. The meaning
+    of <*modalities*> is the same
+    as in the fif output, described above.
+
+``--tag <*name*>``
+
+    By default, all variables in the matlab output files start with
+    ``mne\_``. This option allows to change this prefix to <*name*> _.
+
+``--meg``
+
+    Include MEG channels from the forward solution and noise-covariance
+    matrix.
+
+``--eeg``
+
+    Include EEG channels from the forward solution and noise-covariance
+    matrix.
+
+``--inv <*name*>``
+
+    Output the inverse operator data from the specified file into a
+    mat file. The source and noise covariance matrices as well as active channels
+    have been previously selected when the inverse operator was created
+    with mne_inverse_operator . Thus
+    the options ``--meg`` , ``--eeg`` , ``--senscov`` , ``--srccov`` , ``--noiseonly`` ,
+    and ``--bad`` do not affect the output of the inverse operator.
+
+``--meas <*name*>``
+
+    Specifies the file containing measurement data to be output together with
+    the inverse operator. The channels corresponding to the inverse operator
+    are automatically selected from the file if ``--inv`` .
+    option is present. Otherwise, the channel selection given with ``--sel`` option will
+    be taken into account.
+
+``--set <*number*>``
+
+    Select the data set to be output from the measurement file.
+
+``--bmin <*value/ms*>``
+
+    Specifies the baseline minimum value setting for the measurement signal
+    output.
+
+``--bmax <*value/ms*>``
+
+    Specifies the baseline maximum value setting for the measurement signal
+    output.
+
+.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_converted_mne_data have    been removed. If output of measurement data is requested, the entire    averaged epoch is now included.
+
+Guide to combining options
+--------------------------
+
+The combination of options is quite complicated. The :ref:`BEHDCIII` should be
+helpful to determine the combination of options appropriate for your needs.
+
+
+.. tabularcolumns:: |p{0.38\linewidth}|p{0.1\linewidth}|p{0.2\linewidth}|p{0.3\linewidth}|
+.. _BEHDCIII:
+.. table:: Guide to combining mne_convert_mne_data options.
+
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | Desired output                      | Format  | Required options         | Optional options      |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model                       | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---meg and/or \---eeg |                       |
+    |                                     |         |   \---fif                |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model                       | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | forward model and sensor covariance | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---surfsrc           |
+    |                                     |         |   \---senscov <*name*>   |                       |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance                   | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    |                                     |         |   \---senscov <*name*>   |                       |
+    |                                     |         |   \---noiseonly          |                       |
+    |                                     |         |   \---fif                |                       |
+    |                                     |         |   \---meg and/or --eeg   |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance                   | mat     |   \---senscov <*name*>   | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | sensor covariance eigenvalues       | text    |   \---senscov <*name*>   | \---bad <*name*>      |
+    |                                     |         |   \---out <*name*>       |                       |
+    |                                     |         |   \---eig                |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | evoked MEG/EEG data                 | mat     |   \---meas <*name*>      | \---sel <*name*>      |
+    |                                     |         |   \---out <*name*>       | \---set <*number*>    |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | evoked MEG/EEG data forward model   | mat     |   \---meas <*name*>      | \---bad <*name*>      |
+    |                                     |         |   \---fwd <*name*>       | \---set <*number*>    |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | inverse operator data               | mat     |   \---inv <*name*>       |                       |
+    |                                     |         |   \---out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+    | inverse operator data evoked        | mat     |   \–--inv <*name*>       |                       |
+    | MEG/EEG data                        |         |   \–--meas <*name*>      |                       |
+    |                                     |         |   \–--out <*name*>       |                       |
+    +-------------------------------------+---------+--------------------------+-----------------------+
+
+Matlab data structures
+----------------------
+
+The Matlab output provided by mne_convert_mne_data is
+organized in structures, listed in :ref:`BEHCICCA`. The fields
+occurring in these structures are listed in :ref:`BABCBIGF`.
+
+
+The symbols employed in variable size descriptions are:
+
+``nloc``
+
+    Number
+    of source locations
+
+``nsource``
+
+    Number
+    of sources. For fixed orientation sources nsource = nloc whereas nsource = 3*nloc for
+    free orientation sources
+
+``nchan``
+
+    Number
+    of measurement channels.
+
+``ntime``
+
+    Number
+    of time points in the measurement data.
+
+.. _BEHCICCA:
+.. table:: Matlab structures produced by mne_convert_mne_data.
+
+    ===============  =======================================
+    Structure        Contents
+    ===============  =======================================
+    <*tag*> _meas      Measured data
+    <*tag*> _inv       The inverse operator decomposition
+    <*tag*> _fwd       The forward solution
+    <*tag*> _noise     A standalone noise-covariance matrix
+    ===============  =======================================
+
+The prefix given with the ``--tag`` option is indicated <*tag*> , see :ref:`mne_convert_mne_data`. Its default value is MNE.
+
+
+.. tabularcolumns:: |p{0.14\linewidth}|p{0.13\linewidth}|p{0.73\linewidth}|
+.. _BABCBIGF:
+.. table:: The fields of Matlab structures.
+
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | fwd                   | nsource x nchan | The forward solution, one source on each row. For free     |
+    |                       |                 | orientation sources, the fields of the three orthogonal    |
+    |                       |                 | dipoles for each location are listed consecutively.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | names ch_names        | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG,       |
+    |                       |                 | 2 = EEG). The second column lists the coil types, see      |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 3       | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
+    |                       |                 | For EEG electrodes the first unit vector specifies the     |
+    |                       |                 | location of the reference electrode. If the reference is   |
+    |                       |                 | not specified this value is all zeroes. The remaining unit |
+    |                       |                 | vectors are irrelevant for EEG electrodes.                 |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file. The  |
+    |                       |                 | unit of the data is listed in the first column (T = 112,   |
+    |                       |                 | T/m = 201, V = 107). At present, the second column will be |
+    |                       |                 | always zero, *i.e.*, no unit multiplier.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | Even if the data comes from the conversion already         |
+    |                       |                 | calibrated, the original calibration factors are included. |
+    |                       |                 | The first column is the range member of the fif data       |
+    |                       |                 | structures and while the second is the cal member. To get  |
+    |                       |                 | calibrated values in the units given in ch_units from the  |
+    |                       |                 | raw data, the data must be multiplied with the product of  |
+    |                       |                 | range and cal.                                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_loc            | nloc x 3        | The source locations given in the coordinate frame         |
+    |                       |                 | indicated by the coord_frame member.                       |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_ori            | nsource x 3     | The source orientations                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_selection      | nsource x 2     | Indication of the sources selected from the complete source|
+    |                       |                 | spaces. Each row contains the number of the source in the  |
+    |                       |                 | complete source space (starting with 0) and the source     |
+    |                       |                 | space number (1 or 2). These numbers refer to the order the|
+    |                       |                 | two hemispheres where listed when mne_make_source_space was|
+    |                       |                 | invoked. mne_setup_source_space lists the left hemisphere  |
+    |                       |                 | first.                                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | coord_frame           | string          | Name of the coordinate frame employed in the forward       |
+    |                       |                 | calculations. Possible values are 'head' and 'mri'.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | mri_head_trans        | 4 x 4           | The coordinate frame transformation from mri the MEG 'head'|
+    |                       |                 | coordinates.                                               |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_cov             | nchan x nchan   | The noise covariance matrix                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | source_cov            | nsource         | The elements of the diagonal source covariance matrix.     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sing                  | nchan           | The singular values of                                     |
+    |                       |                 | :math:`A = C_0^{-^1/_2} G R^C = U \Lambda V^T`             |
+    |                       |                 | with :math:`R` selected so that                            |
+    |                       |                 | :math:`\text{trace}(AA^T) / \text{trace}(I) = 1`           |
+    |                       |                 | as discussed in :ref:`CHDDHAGE`                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | eigen_fields          | nchan x nchan   | The rows of this matrix are the left singular vectors of   |
+    |                       |                 | :math:`A`, i.e., the columns of :math:`U`, see above.      |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | eigen_leads           | nchan x nsource | The rows of this matrix are the right singular vectors of  |
+    |                       |                 | :math:`A`, i.e., the columns of :math:`V`, see above.      |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_eigenval        | nchan           | In terms of :ref:`CHDDHAGE`, eigenvalues of :math:`C_0`,   |
+    |                       |                 | i.e., not scaled with number of averages.                  |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | noise_eigenvec        | nchan           | Eigenvectors of the noise covariance matrix. In terms of   |
+    |                       |                 | :ref:`CHDDHAGE`, :math:`U_C^T`.                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | data                  | nchan x ntime   | The measured data. One row contains the data at one time   |
+    |                       |                 | point.                                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | times                 | ntime           | The time points in the above matrix in seconds             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nave                  | 1               | Number of averages as listed in the data file.             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meas_times            | ntime           | The time points in seconds.                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+.. note:: The Matlab files can also be read in Python using :py:func:`scipy.io.loadmat`
+
+
+.. _mne_do_forward_solution:
+
+mne_do_forward_solution
+=======================
+
+This utility accepts the following options:
+
+``--subject <*subject*>``
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+``--src <*name*>``
+
+    Source space name to use. This option overrides the ``--spacing`` option. The
+    source space is searched first from the current working directory
+    and then from ``$SUBJECTS_DIR/`` <*subject*> /bem.
+    The source space file must be specified exactly, including the ``fif`` extension.
+
+``--spacing <*spacing/mm*>  or ``ico-`` <*number  or ``oct-`` <*number*>``
+
+    This is an alternate way to specify the name of the source space
+    file. For example, if ``--spacing 6`` is given on the command
+    line, the source space files searched for are./<*subject*> -6-src.fif
+    and ``$SUBJECTS_DIR/$SUBJECT/`` bem/<*subject*> -6-src.fif.
+    The first file found is used. Spacing defaults to 7 mm.
+
+``--bem <*name*>``
+
+    Specifies the BEM to be used. The name of the file can be any of <*name*> , <*name*> -bem.fif, <*name*> -bem-sol.fif.
+    The file is searched for from the current working directory and
+    from ``bem`` . If this option is omitted, the most recent
+    BEM file in the ``bem`` directory is used.
+
+``--mri <*name*>``
+
+    The name of the MRI description file containing the MEG/MRI coordinate
+    transformation. This file was saved as part of the alignment procedure
+    outlined in :ref:`CHDBEHDC`. The file is searched for from
+    the current working directory and from ``mri/T1-neuromag/sets`` .
+    The search order for MEG/MRI coordinate transformations is discussed
+    below.
+
+``--trans	 <*name*>``
+
+    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
+    from head to mri coordinates, see below. If the option ``--trans`` is
+    present, the ``--mri`` option is not required. The search
+    order for MEG/MRI coordinate transformations is discussed below.
+
+``--meas <*name*>``
+
+    This file is the measurement fif file or an off-line average file
+    produced thereof. It is recommended that the average file is employed for
+    evoked-response data and the original raw data file otherwise. This
+    file provides the MEG sensor locations and orientations as well as
+    EEG electrode locations as well as the coordinate transformation between
+    the MEG device coordinates and MEG head-based coordinates.
+
+``--fwd <*name*>``
+
+    This file will contain the forward solution as well as the coordinate transformations,
+    sensor and electrode location information, and the source space
+    data. A name of the form <*name*> ``-fwd.fif`` is
+    recommended. If this option is omitted the forward solution file
+    name is automatically created from the measurement file name and
+    the source space name.
+
+``--destdir <*directory*>``
+
+    Optionally specifies a directory where the forward solution will
+    be stored.
+
+``--mindist <*dist/mm*>``
+
+    Omit source space points closer than this value to the inner skull surface.
+    Any source space points outside the inner skull surface are automatically
+    omitted. The use of this option ensures that numerical inaccuracies
+    for very superficial sources do not cause unexpected effects in
+    the final current estimates. Suitable value for this parameter is
+    of the order of the size of the triangles on the inner skull surface.
+    If you employ the seglab software
+    to create the triangulations, this value should be about equal to
+    the wish for the side length of the triangles.
+
+``--megonly``
+
+    Omit EEG forward calculations.
+
+``--eegonly``
+
+    Omit MEG forward calculations.
+
+``--all``
+
+    Compute the forward solution for all vertices on the source space.
+
+``--overwrite``
+
+    Overwrite the possibly existing forward model file.
+
+``--help``
+
+    Show usage information for the script.
+
+The MEG/MRI transformation is determined by the following
+search sequence:
+
+- If the ``--mri`` option was
+  present, the file is looked for literally as specified, in the directory
+  of the measurement file specified with the ``--meas`` option,
+  and in the directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets.
+  If the file is not found, the script exits with an error message.
+
+- If the ``--trans`` option was present, the file is
+  looked up literally as specified. If the file is not found, the
+  script exists with an error message.
+
+- If neither ``--mri`` nor ``--trans`` option
+  was not present, the following default search sequence is engaged:
+
+  - The ``.fif`` ending in the
+    measurement file name is replaced by ``-trans.fif`` . If
+    this file is present, it will be used.
+
+  - The newest file whose name ends with ``-trans.fif`` in
+    the directory of the measurement file is looked up. If such a file
+    is present, it will be used.
+
+  - The newest file whose name starts with ``COR-`` in
+    directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets is looked
+    up. If such a file is present, it will be used.
+
+  - If all the above searches fail, the script exits with an error
+    message.
+
+This search sequence is designed to work well with the MEG/MRI
+transformation files output by mne_analyze ,
+see :ref:`CACEHGCD`. It is recommended that -trans.fif file
+saved with the Save default and Save... options in
+the mne_analyze alignment dialog
+are used because then the $SUBJECTS_DIR/$SUBJECT directory will
+be composed of files which are dependent on the subjects's
+anatomy only, not on the MEG/EEG data to be analyzed.
+
+.. note:: If the standard MRI description file and BEM    file selections are appropriate and the 7-mm source space grid spacing    is appropriate, only the ``--meas`` option is necessary.    If EEG data is not used ``--megonly`` option should be    included.
+
+.. note:: If it is conceivable that the current-density    transformation will be incorporated into the inverse operator, specify    a source space with patch information for the forward computation.    This is not mandatory but saves a lot of time when the inverse operator    is created, since the patch information does not need to be created    at that stage.
+
+.. note:: The MEG head to MRI transformation matrix specified    with the ``--trans`` option should be a text file containing    a 4-by-4 matrix:
+
+.. math::    T = \begin{bmatrix}
+		R_{11} & R_{12} & R_{13} & x_0 \\
+		R_{13} & R_{13} & R_{13} & y_0 \\
+		R_{13} & R_{13} & R_{13} & z_0 \\
+		0 & 0 & 0 & 1
+		\end{bmatrix}
+	     
+defined so that if the augmented location vectors in MRI
+head and MRI coordinate systems are denoted by :math:`r_{head}[x_{head}\ y_{head}\ z_{head}\ 1]` and :math:`r_{MRI}[x_{MRI}\ y_{MRI}\ z_{MRI}\ 1]`,
+respectively,
+
+.. math::    r_{MRI} = T r_{head}
+
+.. note:: It is not possible to calculate an EEG forward    solution with a single-layer BEM.
+
+
+.. _mne_do_inverse_operator:
+
+mne_do_inverse_operator
+=======================
+
+``--fwd <*name of the forward solution file*>``
+
+    This is the forward solution file produced in the computations step described
+    in :ref:`BABCHEJD`.
+
+``--meg``
+
+    Employ MEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
+    set only MEG channels are included.
+
+``--eeg``
+
+    Employ EEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
+    set only MEG channels are included.
+
+``--fixed``
+
+    Use fixed source orientations normal to the cortical mantle. By default,
+    the source orientations are not constrained. If ``--fixed`` is specified,
+    the ``--loose`` flag is ignored.
+
+``--loose <*amount*>``
+
+    Use a 'loose' orientation constraint. This means
+    that the source covariance matrix entries corresponding to the current
+    component normal to the cortex are set equal to one and the transverse
+    components are set to <*amount*> .
+    Recommended value of amount is 0.1...0.6.
+
+``--depth``
+
+    Employ depth weighting with the standard settings. For details,
+    see :ref:`CBBDFJIE` and :ref:`CBBDDBGF`.
+
+``--bad <*name*>``
+
+    Specifies a text file to designate bad channels, listed one channel name
+    (like MEG 1933) on each line of the file. Be sure to include both
+    noisy and flat (non-functioning) channels in the list. If bad channels
+    were designated using mne_mark_bad_channels in
+    the measurement file which was specified with the ``--meas`` option when
+    the forward solution was computed, the bad channel information will
+    be automatically included. Also, any bad channel information in
+    the noise-covariance matrix file will be included.
+
+``--noisecov <*name*>``
+
+    Name of the noise-covariance matrix file computed with one of the methods
+    described in :ref:`BABDEEEB`. By default, the script looks
+    for a file whose name is derived from the forward solution file
+    by replacing its ending ``-`` <*anything*> ``-fwd.fif`` by ``-cov.fif`` .
+    If this file contains a projection operator, which will automatically
+    attached to the noise-covariance matrix by mne_browse_raw and mne_process_raw ,
+    no ``--proj`` option is necessary because mne_inverse_operator will
+    automatically include the projectors from the noise-covariance matrix
+    file. For backward compatibility, --senscov can be used as a synonym
+    for --noisecov.
+
+``--noiserank <*value*>``
+
+    Specifies the rank of the noise covariance matrix explicitly rather than
+    trying to reduce it automatically. This option is sheldom needed,
+
+``--megreg <*value*>``
+
+    Regularize the MEG part of the noise-covariance matrix by this amount.
+    Suitable values are in the range 0.05...0.2. For details, see :ref:`CBBHEGAB`.
+
+``--eegreg <*value*>``
+
+    Like ``--megreg`` but applies to the EEG channels.
+
+``--diagnoise``
+
+    Omit the off-diagonal terms of the noise covariance matrix. This option
+    is irrelevant to most users.
+
+``--fmri <*name*>``
+
+    With help of this w file, an *a priori* weighting
+    can be applied to the source covariance matrix. The source of the weighting
+    is usually fMRI but may be also some other data, provided that the weighting can
+    be expressed as a scalar value on the cortical surface, stored in
+    a w file. It is recommended that this w file is appropriately smoothed (see :ref:`CHDEBAHH`)
+    in mne_analyze , tksurfer or
+    with mne_smooth_w to contain
+    nonzero values at all vertices of the triangular tessellation of
+    the cortical surface. The name of the file given is used as a stem of
+    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
+    the left and right hemisphere weight files, respectively. The application
+    of the weighting is discussed in :ref:`CBBDIJHI`.
+
+``--fmrithresh <*value*>``
+
+    This option is mandatory and has an effect only if a weighting function
+    has been specified with the ``--fmri`` option. If the value
+    is in the *a priori* files falls below this value
+    at a particular source space point, the source covariance matrix
+    values are multiplied by the value specified with the ``--fmrioff`` option
+    (default 0.1). Otherwise it is left unchanged.
+
+``--fmrioff <*value*>``
+
+    The value by which the source covariance elements are multiplied
+    if the *a priori* weight falls below the threshold
+    set with ``--fmrithresh`` , see above.
+
+``--srccov <*name*>``
+
+    Use this diagonal source covariance matrix. By default the source covariance
+    matrix is a multiple of the identity matrix. This option is irrelevant
+    to most users.
+
+``--proj <*name*>``
+
+    Include signal-space projection information from this file.
+
+``--inv <*name*>``
+
+    Save the inverse operator decomposition here. By default, the script looks
+    for a file whose name is derived from the forward solution file by
+    replacing its ending ``-fwd.fif`` by <*options*> ``-inv.fif`` , where
+    <*options*> includes options ``--meg``, ``--eeg``, and ``--fixed`` with the double
+    dashes replaced by single ones.
+
+``--destdir <*directory*>``
+
+    Optionally specifies a directory where the inverse operator will
+    be stored.
+
+.. note:: If bad channels are included in the calculation,    strange results may ensue. Therefore, it is recommended that the    data to be analyzed is carefully inspected with to assign the bad    channels correctly.
+
+.. note:: For convenience, the MNE software includes bad-channel    designation files which can be used to ignore all magnetometer or    all gradiometer channels in Vectorview measurements. These files are    called ``vv_grad_only.bad`` and ``vv_mag_only.bad`` , respectively.    Both files are located in ``$MNE_ROOT/share/mne/templates`` .
+
+
+.. _mne_forward_solution:
+
+mne_forward_solution
+====================
+
+``--src <*name*>``
+
+    Source space name to use. The name of the file must be specified exactly,
+    including the directory. Typically, the source space files reside
+    in $SUBJECTS_DIR/$SUBJECT/bem.
+
+``--bem <*name*>``
+
+    Specifies the BEM to be used. These files end with bem.fif or bem-sol.fif and
+    reside in $SUBJECTS_DIR/$SUBJECT/bem. The former file contains only
+    the BEM surface information while the latter files contain the geometry
+    information precomputed with :ref:`mne_prepare_bem_model`,
+    see :ref:`CHDJFHEB`. If precomputed geometry is not available,
+    the linear collocation solution will be computed by mne_forward_solution .
+
+``--origin <*x/mm*> : <*x/mm*> : <*z/mm*>``
+
+    Indicates that the sphere model should be used in the forward calculations.
+    The origin is specified in MEG head coordinates unless the ``--mricoord`` option
+    is present. The MEG sphere model solution computed using the analytical
+    Sarvas formula. For EEG, an approximative solution described in
+
+``--eegmodels <*name*>``
+
+    This option is significant only if the sphere model is used and
+    EEG channels are present. The specified file contains specifications
+    of the EEG sphere model layer structures as detailed in :ref:`CHDIAFIG`. If this option is absent the file ``$HOME/.mne/EEG_models`` will
+    be consulted if it exists.
+
+``--eegmodel <*model name*>``
+
+    Specifies the name of the sphere model to be used for EEG. If this option
+    is missing, the model Default will
+    be employed, see :ref:`CHDIAFIG`.
+
+``--eegrad <*radius/mm*>``
+
+    Specifies the radius of the outermost surface (scalp) of the EEG sphere
+    model, see :ref:`CHDIAFIG`. The default value is 90 mm.
+
+``--eegscalp``
+
+    Scale the EEG electrode locations to the surface of the outermost sphere
+    when using the sphere model.
+
+``--accurate``
+
+    Use accurate MEG sensor coil descriptions. This is the recommended
+    choice. More information
+
+``--fixed``
+
+    Compute the solution for sources normal to the cortical mantle only. This
+    option should be used only for surface-based and discrete source
+    spaces.
+
+``--all``
+
+    Compute the forward solution for all vertices on the source space.
+
+``--label <*name*>``
+
+    Compute the solution only for points within the specified label. Multiple
+    labels can be present. The label files should end with ``-lh.label`` or ``-rh.label`` for
+    left and right hemisphere label files, respectively. If ``--all`` flag
+    is present, all surface points falling within the labels are included.
+    Otherwise, only decimated points with in the label are selected.
+
+``--mindist <*dist/mm*>``
+
+    Omit source space points closer than this value to the inner skull surface.
+    Any source space points outside the inner skull surface are automatically
+    omitted. The use of this option ensures that numerical inaccuracies
+    for very superficial sources do not cause unexpected effects in
+    the final current estimates. Suitable value for this parameter is
+    of the order of the size of the triangles on the inner skull surface.
+    If you employ the seglab software to create the triangulations, this
+    value should be about equal to the wish for the side length of the
+    triangles.
+
+``--mindistout <*name*>``
+
+    Specifies a file name to contain the coordinates of source space points
+    omitted due to the ``--mindist`` option.
+
+``--mri <*name*>``
+
+    The name of the MRI description file containing the MEG/MRI coordinate
+    transformation. This file was saved as part of the alignment procedure
+    outlined in :ref:`CHDBEHDC`. These files typically reside in ``$SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets`` .
+
+``--trans	 <*name*>``
+
+    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
+    from head to mri coordinates. With ``--trans``, ``--mri`` option is not
+    required.
+
+``--notrans``
+
+    The MEG/MRI coordinate transformation is taken as the identity transformation, *i.e.*,
+    the two coordinate systems are the same. This option is useful only
+    in special circumstances. If more than one of the ``--mri`` , ``--trans`` ,
+    and ``--notrans`` options are specified, the last one remains
+    in effect.
+
+``--mricoord``
+
+    Do all computations in the MRI coordinate system. The forward solution
+    matrix is not affected by this option if the source orientations
+    are fixed to be normal to the cortical mantle. If all three source components
+    are included, the forward three source orientations parallel to
+    the coordinate axes is computed. If ``--mricoord`` is present, these
+    axes correspond to MRI coordinate system rather than the default
+    MEG head coordinate system. This option is useful only in special
+    circumstances.
+
+``--meas <*name*>``
+
+    This file is the measurement fif file or an off-line average file
+    produced thereof. It is recommended that the average file is employed for
+    evoked-response data and the original raw data file otherwise. This
+    file provides the MEG sensor locations and orientations as well as
+    EEG electrode locations as well as the coordinate transformation between
+    the MEG device coordinates and MEG head-based coordinates.
+
+``--fwd <*name*>``
+
+    This file will contain the forward solution as well as the coordinate transformations,
+    sensor and electrode location information, and the source space
+    data. A name of the form <*name*>-fwd.fif is
+    recommended.
+
+``--meg``
+
+    Compute the MEG forward solution.
+
+``--eeg``
+
+    Compute the EEG forward solution.
+
+``--grad``
+
+    Include the derivatives of the fields with respect to the dipole
+    position coordinates to the output, see :ref:`BJEFEJJG`.
+
+
+.. _mne_inverse_operator:
+
+mne_inverse_operator
+====================
+
+``--meg``
+
+    Employ MEG data in the calculation of the estimates.
+
+``--eeg``
+
+    Employ EEG data in the calculation of the estimates. Note: The EEG
+    computations have not been thoroughly tested at this time.
+
+``--fixed``
+
+    Use fixed source orientations normal to the cortical mantle. By default,
+    the source orientations are not constrained.
+
+``--loose <amount>``
+
+    Employ a loose orientation constraint (LOC). This means that the source
+    covariance matrix entries corresponding to the current component
+    normal to the cortex are set equal to one and the transverse components
+    are set to <*amount*> . Recommended
+    value of amount is 0.2...0.6.
+
+``--loosevar <amount>``
+
+    Use an adaptive loose orientation constraint. This option can be
+    only employed if the source spaces included in the forward solution
+    have the patch information computed, see :ref:`CIHCHDAE`.
+
+``--fwd <name>``
+
+    Specifies the name of the forward solution to use.
+
+``--noisecov <name>``
+
+    Specifies the name of the noise-covariance matrix to use. If this
+    file contains a projection operator, attached by :ref:`mne_browse_raw` and :ref:`mne_process_raw`,
+    no additional projection vectors can be added with the ``--proj`` option. For
+    backward compatibility, ``--senscov`` can be used as a synonym for ``--noisecov``.
+
+``--noiserank <value>``
+
+    Specifies the rank of the noise covariance matrix explicitly rather than
+    trying to reduce it automatically. This option is seldom needed,
+
+``--gradreg <value>``
+
+    Regularize the planar gradiometer section (channels for which the unit
+    of measurement is T/m) of the noise-covariance matrix by the given
+    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
+
+``--magreg <value>``
+
+    Regularize the magnetometer and axial gradiometer section (channels
+    for which the unit of measurement is T) of the noise-covariance matrix
+    by the given amount. The value is restricted to the range 0...1.
+    For details, see :ref:`CBBHEGAB`.
+
+``--eegreg <value>``
+
+    Regularize the EEG section of the noise-covariance matrix by the given
+    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
+
+``--diagnoise``
+
+    Omit the off-diagonal terms from the noise-covariance matrix in
+    the computations. This may be useful if the amount of signal-free
+    data has been insufficient to calculate a reliable estimate of the
+    full noise-covariance matrix.
+
+``--srccov <name>``
+
+    Specifies the name of the diagonal source-covariance matrix to use.
+    By default the source covariance matrix is a multiple of the identity matrix.
+    This option can be employed to incorporate the fMRI constraint.
+    The software to create a source-covariance matrix file from fMRI
+    data will be provided in a future release of this software package.
+
+``--depth``
+
+    Employ depth weighting. For details, see :ref:`CBBDFJIE`.
+
+``--weightexp <value>``
+
+    This parameter determines the steepness of the depth weighting function
+    (default = 0.8). For details, see :ref:`CBBDFJIE`.
+
+``--weightlimit <value>``
+
+    Maximum relative strength of the depth weighting (default = 10). For
+    details, see :ref:`CBBDFJIE`.
+
+``--fmri <name>``
+
+    With help of this w file, an *a priori* weighting
+    can be applied to the source covariance matrix. The source of the
+    weighting is usually fMRI but may be also some other data, provided
+    that the weighting  can be expressed as a scalar value on the cortical
+    surface, stored in a w file. It is recommended that this w file
+    is appropriately smoothed (see :ref:`CHDEBAHH`) in mne_analyze , tksurfer or
+    with mne_smooth_w to contain
+    nonzero values at all vertices of the triangular tessellation of
+    the cortical surface. The name of the file given is used as a stem of
+    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
+    the left and right hemsphere weight files, respectively. The application
+    of the weighting is discussed in :ref:`CBBDIJHI`.
+
+``--fmrithresh <value>``
+
+    This option is mandatory and has an effect only if a weighting function
+    has been specified with the ``--fmri`` option. If the value
+    is in the *a priori* files falls below this value
+    at a particular source space point, the source covariance matrix
+    values are multiplied by the value specified with the ``--fmrioff`` option
+    (default 0.1). Otherwise it is left unchanged.
+
+``--fmrioff <value>``
+
+    The value by which the source covariance elements are multiplied
+    if the *a priori* weight falls below the threshold
+    set with ``--fmrithresh`` , see above.
+
+``--bad <name>``
+
+    A text file to designate bad channels, listed one channel name on each
+    line of the file. If the noise-covariance matrix specified with the ``--noisecov`` option
+    contains projections, bad channel lists can be included only if
+    they specify all channels containing non-zero entries in a projection
+    vector. For example, bad channels can usually specify all magnetometers
+    or all gradiometers since the projection vectors for these channel
+    types are completely separate. Similarly, it is possible to include
+    MEG data only or EEG data only by using only one of ``--meg`` or ``--eeg`` options
+    since the projection vectors for MEG and EEG are always separate.
+
+``--surfsrc``
+
+    Use a source coordinate system based on the local surface orientation
+    at the source location. By default, the three dipole components are
+    pointing to the directions of the x, y, and z axis of the coordinate system
+    employed in the forward calculation (usually the MEG head coordinate
+    frame). This option changes the orientation so that the first two
+    source components lie in the plane normal to the surface normal
+    at the source location and the third component is aligned with it.
+    If patch information is available in the source space, the normal
+    is the average patch normal, otherwise the vertex normal at the source
+    location is used. If the ``--loose`` or ``--loosevar`` option
+    is employed, ``--surfsrc`` is implied.
+
+``--exclude <name>``
+
+    Exclude the source space points defined by the given FreeSurfer 'label' file
+    from the source reconstruction. This is accomplished by setting
+    the corresponding entries in the source-covariance matrix equal
+    to zero. The name of the file should end with ``-lh.label``
+    if it refers to the left hemisphere and with ``-rh.label`` if
+    it lists points in the right hemisphere, respectively.
+
+``--proj <name>``
+
+    Include signal-space projection (SSP) information from this file. For information
+    on SSP, see :ref:`CACCHABI`. If the projections are present in
+    the noise-covariance matrix, the ``--proj`` option is
+    not allowed.
+
+``--csd``
+
+    Compute the inverse operator for surface current densities instead
+    of the dipole source amplitudes. This requires the computation of patch
+    statistics for the source space. Since this computation is time consuming,
+    it is recommended that the patch statistics are precomputed and
+    the source space file containing the patch information is employed
+    already when the forward solution is computed, see :ref:`CIHCHDAE` and :ref:`BABCHEJD`.
+    For technical details of the patch information, please consult :ref:`CBBDBHDI`. This option is considered experimental at
+    the moment.
+
+``--inv <name>``
+
+    Save the inverse operator decomposition here.
+
+
+.. _mne_make_movie:
+
+mne_make_movie
+==============
+
+Input files
+-----------
+
+``--inv <*name*>``
+
+    Load the inverse operator decomposition from here.
+
+``--meas <*name*>``
+
+    Load the MEG or EEG data from this file.
+
+``--set <*number*>``
+
+    The data set (condition) number to load. This is the sequential
+    number of the condition. You can easily see the association by looking
+    at the condition list in mne_analyze when
+    you load the file.
+
+``--stcin <*name*>``
+
+    Specifies an stc file to read as input.
+
+Times and baseline
+------------------
+
+``--tmin <*time/ms*>``
+
+    Specifies the starting time employed in the analysis. If ``--tmin`` option
+    is missing the analysis starts from the beginning of the epoch.
+
+``--tmax <*time/ms*>``
+
+    Specifies the finishing time employed in the analysis. If ``--tmax`` option
+    is missing the analysis extends to the end of the epoch.
+
+``--tstep <*step/ms*>``
+
+    Time step between consequtive movie frames, specified in milliseconds.
+
+``--integ  <*:math:`\Delta`t/ms*>``
+
+    Integration time for each frame. Defaults to zero. The integration will
+    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
+    the integration range will be :math:`t_0 - \Delta t/2 \leq t \leq t_0 + \Delta t/2`.
+
+``--pick <*time/ms*>``
+
+    Pick a time for the production of rgb, tif, jpg, png, or w files.
+    Several pick options may be present. The time must be with in the
+    analysis interval, indicated by the ``--tmin`` and ``--tmax`` options.
+    The ``--rgb`` , ``--tif`` , ``--jpg`` , ``--png`` , and ``--w`` options
+    control which file types are actually produced. When a ``--pick`` option
+    is encountered, the effect of any preceding ``--pickrange`` option
+    is ignored.
+
+``--pickrange``
+
+    All previous ``-pick`` options will be ignored. Instead,
+    snapshots are produced as indicated by the ``--tmin`` , ``--tmax`` ,
+    and ``--tstep`` options. This is useful, *e.g.*,
+    for producing input for scripts merging the individual graphics
+    snapshots into a composite "filmstrip" reprensentation.
+    However, such scripts are not yet part of the MNE software.
+
+``--bmin <*time/ms*>``
+
+    Specifies the starting time of the baseline. In order to activate
+    baseline correction, both ``--bmin`` and ``--bmax`` options
+    must be present.
+
+``--bmax <*time/ms*>``
+
+    Specifies the finishing time of the baseline.
+
+``--baselines <*file_name*>``
+
+    Specifies a file which contains the baseline settings. Each line
+    of the file should contain a name of a channel, followed by the
+    baseline value, separated from the channel name by a colon. The
+    baseline values must be specified in basic units, i.e., Teslas/meter
+    for gradiometers, Teslas for magnetometers, and Volts for EEG channels.
+    If some channels are missing from the baseline file, warning messages are
+    issued: for these channels, the ``--bmin`` and ``--bmax`` settings will
+    be used.
+
+Options controlling the estimates
+---------------------------------
+
+``--nave <*value*>``
+
+    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
+    as discussed in :ref:`CBBDGIAE`. If the input data file is
+    one produced by :ref:`mne_browse_raw` or :ref:`mne_process_raw`, the
+    number of averages is correct in the file. However, if subtractions
+    or some more complicated combinations of simple averages are produced,
+    e.g., by  using the xplotter software,
+    the number of averages should be manually adjusted along the guidelines
+    given in :ref:`CBBDGIAE`. This is accomplished either by
+    employing this flag or by adjusting the number of averages in the
+    data file with help of the utility mne_change_nave .
+
+``--snr <*value*>``
+
+    An estimate for the amplitude SNR. The regularization parameter will
+    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
+    SNR = 3. Automatic selection of the regularization parameter is
+    currently not supported.
+
+``--spm``
+
+    Calculate the dSPM instead of the expected current value.
+
+``--sLORETA``
+
+    Calculate the noise-normalized estimate using the sLORETA approach.
+    sLORETA solutions have in general a smaller location bias than either
+    the expected current (MNE) or the dSPM.
+
+``--signed``
+
+    Indicate the current direction with respect to the cortex outer
+    normal by sign. Currents flowing out of the cortex are thus considered
+    positive (warm colors) and currents flowing into the cortex negative (cold
+    colors).
+
+``--picknormalcomp``
+
+    The components of the estimates corresponding to directions tangential
+    with the cortical mantle are zeroed out.
+
+.. _CBBBBHIF:
+
+Visualization options
+---------------------
+
+``--subject <*subject*>``
+
+    Specifies the subject whose MRI data is employed in the visualization.
+    This must be the same subject that was used for computing the current
+    estimates. The environment variable SUBJECTS_DIR must be set to
+    point to a locations where the subjects are to be found.
+
+``--morph <*subject*>``
+
+    Morph the data to to the cortical surface of another subject. The Quicktime
+    movie, stc-file, graphics snapshot, and w-file outputs are affected
+    by this option, *i.e.*, they will take the morphing
+    into account and will represent the data on the cortical surface
+    of the subject defined with this option. The stc files morphed to
+    a single subject's cortical surface are used by mne_average_estimates to
+    combine data from different subjects.
+    If morphing is selected appropriate smoothing must be specified
+    with the ``--smooth`` option. The morphing process can
+    be made faster by precomputing the necessary morphing maps with mne_make_morph_maps ,
+    see :ref:`CHDBBHDH`. More information about morphing and averaging
+    can be found in :ref:`ch_morph`.
+
+``--morphgrade <*number*>``
+
+    Adjusts the number of vertices in the stc files produced when morphing
+    is in effect. By default the number of vertices is 10242 corresponding
+    to --morphgrade value 5. Allowed values are 3, 4, 5, and 6 corresponding
+    to 642, 2562, 10242, and 40962 vertices, respectively.
+
+``--surface <*surface name*>``
+
+    Name of the surface employed in the visualization. The default is inflated .
+
+``--curv <*name*>``
+
+    Specify a nonstandard curvature file name. The default curvature files
+    are ``lh.curv`` and ``rh.curv`` . With this option,
+    the names become ``lh.`` <*name*> and ``rh.`` <*name*> .
+
+``--patch <*name*> [: <*angle/deg*> ]``
+
+    Specify the name of a surface patch to be used for visualization instead
+    of the complete cortical surface. A complete name of a patch file
+    in the FreeSurface surf directory must be given. The name should
+    begin with lh or rh to allow association of the patch with a hemisphere.
+    Maximum of two ``--patch`` options can be in effect, one patch for each
+    hemisphere. If the name refers to a flat patch, the name can be
+    optionally followed by a colon and a rotation angle in degrees.
+    The flat patch will be then rotated counterclockwise by this amount
+    before display. You can check a suitable value for the rotation
+    angle by loading the patch interactively in mne_analyze .
+
+``--width <*value*>``
+
+    Width of the graphics output frames in pixels. The default width
+    is 600 pixels.
+
+``--height <*value*>``
+
+    Height of the graphics output frames in pixels. The default height
+    is 400 pixels.
+
+``--mag <*factor*>``
+
+    Magnify the the visualized scene by this factor.
+
+``--lh``
+
+    Select the left hemisphere for graphics output. By default, both hemisphere
+    are processed.
+
+``--rh``
+
+    Select the right hemisphere for graphics output. By default, both hemisphere
+    are processed.
+
+``--view <*name*>``
+
+    Select the name of the view for mov, rgb, and tif graphics output files.
+    The default viewnames, defined in ``$MNE_ROOT/share/mne/mne_analyze/eyes`` ,
+    are *lat* (lateral), *med* (medial), *ven* (ventral),
+    and *occ* (occipital). You can override these
+    defaults by creating the directory .mne under your home directory
+    and copying the eyes file there. Each line of the eyes file contais
+    the name of the view, the viewpoint for the left hemisphere, the
+    viewpoint for the right hemisphere, left hemisphere up vector, and
+    right hemisphere up vector. The entities are separated by semicolons.
+    Lines beginning with the pound sign (#) are considered to be comments.
+
+``--smooth <*nstep*>``
+
+    Number of smoothsteps to take when producing the output frames. Depending
+    on the source space decimation, an appropriate number is 4 - 7.
+    Smoothing does not have any effect for the original brain if stc
+    files are produced. However, if morphing is selected smoothing is
+    mandatory even with stc output. For details of the smoothing procedure,
+    see :ref:`CHDEBAHH`.
+
+``--nocomments``
+
+    Do not include the comments in the image output files or movies.
+
+``--noscalebar``
+
+    Do not include the scalebar in the image output files or movies.
+
+``--alpha <*value*>``
+
+    Adjust the opacity of maps shown on the cortical surface (0 = transparent,
+    1 = totally opaque). The default value is 1.
+
+Thresholding
+------------
+
+``--fthresh <*value*>``
+
+    Specifies the threshold for the displayed colormaps. At the threshold,
+    the overlayed color will be equal to the background surface color.
+    For currents, the value will be multiplied by :math:`1^{-10}`.
+    The default value is 8.
+
+``--fmid <*value*>``
+
+    Specifies the midpoint for the displayed colormaps. At this value, the
+    overlayed color will be read (positive values) or blue (negative values).
+    For currents, the value will be multiplied by :math:`1^{-10}`.
+    The default value is 15.
+
+``--fmax <*value*>``
+
+    Specifies the maximum point for the displayed colormaps. At this value,
+    the overlayed color will bright yellow (positive values) or light
+    blue (negative values). For currents, the value will be multiplied
+    by :math:`1^{-10}`. The default value is 20.
+
+``--fslope <*value*>``
+
+    Included for backwards compatibility. If this option is specified
+    and ``--fmax`` option is *not* specified, :math:`F_{max} = F_{mid} + 1/F_{slope}`.
+
+Output files
+------------
+
+``--mov <*name*>``
+
+    Produce QuickTime movie files. This is the 'stem' of
+    the ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.mov`` is added to indicate a QuickTime output
+    file. The movie is produced for all times as dictated by the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
+    and ``--integ`` options.
+
+``--qual <*value*>``
+
+    Quality of the QuickTime movie output. The default quality is 80 and
+    allowed range is 25 - 100. The size of the movie files is a monotonously
+    increasing function of the movie quality.
+
+``--rate <*rate*>``
+
+    Specifies the frame rate of the QuickTime movies. The default value is :math:`1/(10t_{step})`,
+    where :math:`t_{step}` is the time between subsequent
+    movie frames produced in seconds.
+
+``--rgb <*name*>``
+
+    Produce rgb snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.rgb`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+``--tif <*name*>``
+
+    Produce tif snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.tif`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+    The tif output files are *not* compressed. Pass
+    the files through an image processing program to compress them.
+
+``--jpg <*name*>``
+
+    Produce jpg snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.jpg`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+``--png <*name*>``
+
+    Produce png snapshots. This is the 'stem' of the
+    ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` or ``-rh`` is
+    then appended. The name of the view is indicated with ``-`` <*viename*> .
+    Finally, ``.png`` is added to indicate an rgb output file.
+    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
+
+``--w <*name*>``
+
+    Produce w file snapshots. This is the 'stem' of
+    the ouput file name. The actual name is derived by stripping anything
+    up to and including the last period from the end of <*name*> .
+    According to the hemisphere, ``-lh`` .w or ``-rh`` .w
+    is then appended. Files are produced for all picked times as dictated
+    by the ``--pick`` and ``--integ`` options.
+
+``--stc <*name*>``
+
+    Produce stc files for either the original subject or the one selected with
+    the ``--morph`` option. These files will contain data only
+    for the decimated locations. If morphing is selected, appropriate
+    smoothing is mandatory. The morphed maps will be decimated with
+    help of a subdivided icosahedron so that the morphed stc files will
+    always contain 10242 vertices. These morphed stc files can be easily
+    averaged together, e.g., in Matlab since they always contain an
+    identical set of vertices.
+
+``--norm <*name*>``
+
+    Indicates that a separate w file
+    containing the noise-normalization values will be produced. The
+    option ``--spm`` must also be present. Nevertheless, the
+    movies and stc files output will
+    contain MNE values. The noise normalization data files will be called <*name*>- <*SNR*> ``-lh.w`` and <*name*>- <*SNR*> ``-rh.w`` .
+
+.. _CBBHHCEF:
+
+Label processing
+----------------
+
+``--label <*name*>``
+
+    Specifies a label file to process. For each label file, the values
+    of the computed estimates are listed in text files. The label files
+    are produced by tksurfer or mne_analyze and
+    specify regions of interests (ROIs). A label file name should end
+    with ``-lh.label`` for left-hemisphere ROIs and with ``-rh.label`` for
+    right-hemisphere ones. The corresponding output files are tagged
+    with ``-lh-`` <*data type*> ``.amp`` and ``-rh-`` <*data type*> ``.amp``, respectively. <*data type*> equals ``'mne`` ' for
+    expected current data and ``'spm`` ' for
+    dSPM data. Each line of the output file contains the waveform of
+    the output quantity at one of the source locations falling inside
+    the ROI. For more information about the label output formats, see :ref:`CACJJGFA`.
+
+``--labelcoords``
+
+    Include coordinates of the vertices in the output. The coordinates will
+    be listed in millimeters in the coordinate system which was specified
+    for the forward model computations. This option cannot be used with
+    stc input files (``--stcin`` ) because the stc files do
+    not contain the coordinates of the vertices.
+
+``--labelverts``
+
+    Include vertex numbers in the output. The numbers refer to the complete
+    triangulation of the corresponding surface and are zero based. The
+    vertex numbers are by default on the first row or first column of the
+    output file depending on whether or not the ``--labeltimebytime`` option
+    is present.
+
+``--labeltimebytime``
+
+    Output the label data time by time instead of the default vertex-by-vertex
+    output.
+
+``--labeltag <*tag*>``
+
+    End the output files with the specified tag. By default, the output files
+    will end with ``-mne.amp`` or ``-spm.amp`` depending
+    on whether MNE or one of the noise-normalized estimates (dSPM or sLORETA)
+    was selected.
+
+``--labeloutdir <*directory*>``
+
+    Specifies the directory where the output files will be located.
+    By default, they will be in the current working directory.
+
+``--labelcomments``
+
+    Include comments in the output files. The comment lines begin with the
+    percent sign to make the files compatible with Matlab.
+
+``--scaleby <*factor*>``
+
+    By default, the current values output to the files will be in the
+    actual physical units (Am). This option allows scaling of the current
+    values to other units. mne_analyze typically
+    uses 1e10 to bring the numbers to a human-friendly scale.
+
+Using stc file input
+--------------------
+
+The ``--stcin`` option allows input of stc files.
+This feature has several uses:
+
+- QuickTime movies can be produced from
+  existing stc files without having to resort to EasyMeg.
+
+- Graphics snapshot can be produced from existing stc files.
+
+- Existing stc files can be temporally resampled with help of
+  the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
+  and ``--integ`` options.
+
+- Existing stc files can be morphed to another cortical surface
+  by specifying the ``--morph`` option.
+
+- Timecourses can be inquired and stored into text files with
+  help of the ``--label`` options, see above.
+
+
+.. _mne_make_source_space:
+
+mne_make_source_space
+=====================
+
+``--subject <name>``
+
+    Name of the subject.
+
+``--morph <name>``
+
+    Name of the subject to morph the source space to.
+
+``--spacing <dist>``
+
+    Approximate source space spacing in mm.
+
+``--ico <grade>``
+
+    Use the subdivided icosahedron or octahedron in downsampling instead of the --spacing option.
+
+``--oct <grade>``
+
+    Same as --ico -grade.
+
+``--surf <names>``
+
+    Surface file names (separated by colons)
+
+``--src <name>``
+
+    Name of the output file.
+
+
+.. _mne_process_raw:
+
+mne_process_raw
+===============
+
+``--cd <*dir*>``
+
+    Change to this directory before starting.
+
+``--raw <*name*>``
+
+    Specifies the raw data file to be opened. This option is required.
+
+``--grad <*number*>``
+
+    Apply software gradient compensation of the given order to the data loaded
+    with the ``--raw`` option. This option is effective only
+    for data acquired with the CTF and 4D Magnes MEG systems. If orders
+    different from zero are requested for Neuromag data, an error message appears
+    and data are not loaded. Any compensation already existing in the
+    file can be undone or changed to another order by using an appropriate ``--grad`` options.
+    Possible orders are 0 (No compensation), 1 - 3 (CTF data), and 101
+    (Magnes data). The same compensation will be applied to all loaded data
+    files.
+
+``--filtersize <*size*>``
+
+    Adjust the length of the FFT to be applied in filtering. The number will
+    be rounded up to the next power of two. If the size is :math:`N`,
+    the corresponding length of time is :math:`N/f_s`,
+    where :math:`f_s` is the sampling frequency
+    of your data. The filtering procedure includes overlapping tapers
+    of length :math:`N/2` so that the total FFT
+    length will actually be :math:`2N`. This
+    value cannot be changed after the program has been started.
+
+``--highpass <*value/Hz*>``
+
+    Highpass filter frequency limit. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered. It
+    is best to experiment with the interactive version to find the lowest applicable
+    filter for your data. This value can be adjusted in the interactive
+    version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+``--highpassw <*value/Hz*>``
+
+    The width of the transition band of the highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`. This
+    value cannot be adjusted in the interactive version of the program.
+
+``--lowpass <*value/Hz*>``
+
+    Lowpass filter frequency limit. This value can be adjusted in the interactive
+    version of the program. The default is 40 Hz.
+
+``--lowpassw <*value/Hz*>``
+
+    The width of the transition band of the lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+``--eoghighpass <*value/Hz*>``
+
+    Highpass filter frequency limit for EOG. If this is too low with respect
+    to the selected FFT length and, the data will not be highpass filtered.
+    It is best to experiment with the interactive version to find the
+    lowest applicable filter for your data. This value can be adjusted in
+    the interactive version of the program. The default is 0, *i.e.*,
+    no highpass filter apart from that used during the acquisition will
+    be in effect.
+
+``--eoghighpassw <*value/Hz*>``
+
+    The width of the transition band of the EOG highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`f_s / (2N)`.
+    This value cannot be adjusted in the interactive version of the
+    program.
+
+``--eoglowpass <*value/Hz*>``
+
+    Lowpass filter frequency limit for EOG. This value can be adjusted in
+    the interactive version of the program. The default is 40 Hz.
+
+``--eoglowpassw <*value/Hz*>``
+
+    The width of the transition band of the EOG lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+``--filteroff``
+
+    Do not filter the data. This initial value can be changed in the
+    interactive version of the program.
+
+``--digtrig <*name*>``
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+``--digtrigmask <*number*>``
+
+    Mask to be applied to the trigger channel values before considering them.
+    This option is useful if one wants to set some bits in a don't care
+    state. For example, some finger response pads keep the trigger lines
+    high if not in use, *i.e.*, a finger is not in
+    place. Yet, it is convenient to keep these devices permanently connected
+    to the acquisition system. The number can be given in decimal or
+    hexadecimal format (beginning with 0x or 0X). For example, the value
+    255 (0xFF) means that only the lowest order byte (usually trigger
+    lines 1 - 8 or bits 0 - 7) will be considered.
+
+``--proj <*name*>``
+
+    Specify the name of the file of the file containing a signal-space
+    projection (SSP) operator. If ``--proj`` options are present
+    the data file is not consulted for an SSP operator. The operator
+    corresponding to average EEG reference is always added if EEG data
+    are present.
+
+``--projon``
+
+    Activate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
+    be present on the mne_processs_raw command line.
+
+``--projoff``
+
+    Deactivate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
+    be present on the mne_processs_raw command line.
+
+``--makeproj``
+
+    Estimate the noise subspace from the data and create a new signal-space
+    projection operator instead of using one attached to the data file
+    or those specified with the ``--proj`` option. The following
+    eight options define the parameters of the noise subspace estimation. More
+    information on the signal-space projection can be found in :ref:`CACCHABI`.
+
+``--projevent <*no*>``
+
+    Specifies the events which identify the time points of interest
+    for projector calculation. When this option is present, ``--projtmin`` and ``--projtmax`` are
+    relative to the time point of the event rather than the whole raw
+    data file.
+
+``--projtmin <*time/s*>``
+
+    Specify the beginning time for the calculation of the covariance matrix
+    which serves as the basis for the new SSP operator. This option
+    is required with ``--projevent`` and defaults to the beginning
+    of the raw data file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
+    are present.
+
+``--projtmax <*time/s*>``
+
+    Specify the ending time for the calculation of the covariance matrix which
+    serves as the basis for the new SSP operator. This option is required
+    with ``--projevent`` and defaults to the end of the raw data
+    file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
+    are present.
+
+``--projngrad <*number*>``
+
+    Number of SSP components to include for planar gradiometers (default
+    = 5). This value is system dependent. For example, in a well-shielded
+    quiet environment, no planar gradiometer projections are usually
+    needed.
+
+``--projnmag <*number*>``
+
+    Number of SSP components to include for magnetometers / axial gradiometers
+    (default = 8). This value is system dependent. For example, in a
+    well-shielded quiet environment, 3 - 4 components are need
+    while in a noisy environment with light shielding even more than
+    8 components may be necessary.
+
+``--projgradrej <*value/ fT/cm*>``
+
+    Rejection limit for planar gradiometers in the estimation of the covariance
+    matrix frfixom which the new SSP operator is derived. The default
+    value is 2000 fT/cm. Again, this value is system dependent.
+
+``--projmagrej <*value/ fT*>``
+
+    Rejection limit for planar gradiometers in the estimation of the covariance
+    matrix from which the new SSP operator is derived. The default value
+    is 3000 fT. Again, this value is system dependent.
+
+``--saveprojtag <*tag*>``
+
+    This option defines the names of files to hold the SSP operator.
+    If this option is present the ``--makeproj`` option is
+    implied. The SSP operator file name is formed by removing the trailing ``.fif`` or ``_raw.fif`` from
+    the raw data file name by appending  <*tag*> .fif
+    to this stem. Recommended value for <*tag*> is ``-proj`` .
+
+``--saveprojaug``
+
+    Specify this option if you want to use the projection operator file output
+    in the Elekta-Neuromag Signal processor (graph) software.
+
+``--eventsout <*name*>``
+
+    List the digital trigger channel events to the specified file. By default,
+    only transitions from zero to a non-zero value are listed. If multiple
+    raw data files are specified, an equal number of ``--eventsout`` options
+    should be present. If the file name ends with .fif, the output will
+    be in fif format, otherwise a text event file will be output.
+
+``--allevents``
+
+    List all transitions to file specified with the ``--eventsout`` option.
+
+``--events <*name*>``
+
+    Specifies the name of a fif or text format event file (see :ref:`CACBCEGC`) to be associated with a raw data file to be
+    processed. If multiple raw data files are specified, the number
+    of ``--events`` options can be smaller or equal to the
+    number of raw data files. If it is equal, the event filenames will
+    be associated with the raw data files in the order given. If it
+    is smaller, the remaining raw data files for which an event file
+    is not specified will *not* have an event file associated
+    with them. The event file format is recognized from the file name:
+    if it ends with ``.fif`` , the file is assumed to be in
+    fif format, otherwise a text file is expected.
+
+``--ave <*name*>``
+
+    Specifies the name of an off-line averaging description file. For details
+    of the format of this file, please consult :ref:`CACBBDGC`.
+    If multiple raw data files are specified, the number of ``--ave`` options
+    can be smaller or equal to the number of raw data files. If it is
+    equal, the averaging description file names will be associated with
+    the raw data files in the order given. If it is smaller, the last
+    description file will be used for the remaining raw data files.
+
+``--saveavetag <*tag*>``
+
+    If this option is present and averaging is evoked with the ``--ave`` option,
+    the outfile and logfile options in the averaging description file
+    are ignored. Instead, trailing ``.fif`` or ``_raw.fif`` is
+    removed from the raw data file name and <*tag*> ``.fif`` or <*tag*> ``.log`` is appended
+    to create the output and log file names, respectively.
+
+``--gave <*name*>``
+
+    If multiple raw data files are specified as input and averaging
+    is requested, the grand average over all data files will be saved
+    to <*name*> .
+
+``--cov <*name*>``
+
+    Specify the name of a description file for covariance matrix estimation. For
+    details of the format of this file, please see :ref:`CACEBACG`.
+    If multiple raw data files are specified, the number of ``--cov`` options can
+    be smaller or equal to the number of raw data files. If it is equal, the
+    averaging description file names will be associated with the raw data
+    files in the order given. If it is smaller, the last description
+    file will be used for the remaining raw data files.
+
+``--savecovtag <*tag*>``
+
+    If this option is present and covariance matrix estimation is evoked with
+    the ``--cov`` option, the outfile and logfile options in
+    the covariance estimation description file are ignored. Instead,
+    trailing ``.fif`` or ``_raw.fif`` is removed from
+    the raw data file name and <*tag*> .fif or <*tag*> .log
+    is appended to create the output and log file names, respectively.
+    For compatibility with other MNE software scripts, ``--savecovtag -cov`` is recommended.
+
+``--savehere``
+
+    If the ``--saveavetag`` and ``--savecovtag`` options
+    are used to generate the file output file names, the resulting files
+    will go to the same directory as raw data by default. With this
+    option the output files will be generated in the current working
+    directory instead.
+
+``--gcov <*name*>``
+
+    If multiple raw data files are specified as input and covariance matrix estimation
+    is requested, the grand average over all data files will be saved
+    to <*name*> . The details of
+    the covariance matrix estimation are given in :ref:`CACHAAEG`.
+
+``--save <*name*>``
+
+    Save a filtered and optionally down-sampled version of the data
+    file to <*name*> . If multiple
+    raw data files are specified, an equal number of ``--save`` options
+    should be present. If <*filename*> ends
+    with ``.fif`` or ``_raw.fif`` , these endings are
+    deleted. After these modifications, ``_raw.fif`` is inserted
+    after the remaining part of the file name. If the file is split
+    into multiple parts (see ``--split`` option below), the
+    additional parts will be called <*name*> ``-`` <*number*> ``_raw.fif``
+
+``--split <*size/MB*>``
+
+    Specifies the maximum size of the raw data files saved with the ``--save`` option.
+    By default, the output is split into files which are just below
+    2 GB so that the fif file maximum size is not exceed.
+
+``--anon``
+
+    Do not include any subject information in the output files created with
+    the ``--save`` option.
+
+``--decim <*number*>``
+
+    The data are decimated by this factor before saving to the file
+    specified with the ``--save`` option. For decimation to
+    succeed, the data must be lowpass filtered to less than third of
+    the sampling frequency effective after decimation.
+
+
+.. _mne_redo_file:
+
+mne_redo_file
+=============
+
+Usage: /home/larsoner/custombuilds/mne/current/bin/mne_redo_file file-to-redo
+
+
+.. _mne_redo_file_nocwd:
+
+mne_redo_file_nocwd
+===================
+
+Usage: /home/larsoner/custombuilds/mne/current/bin/mne_redo_file_nocwd file-to-redo
+
+
+.. _mne_setup_forward_model:
+
+mne_setup_forward_model
+=======================
+
+``--subject <*subject*>``
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+``--surf``
+
+    Use the FreeSurfer surface files instead of the default ASCII triangulation
+    files. Please consult :ref:`BABDBBFC` for the standard file
+    naming scheme.
+
+``--noswap``
+
+    Traditionally, the vertices of the triangles in 'tri' files
+    have been ordered so that, seen from the outside of the triangulation,
+    the vertices are ordered in clockwise fashion. The fif files, however,
+    employ the more standard convention with the vertices ordered counterclockwise.
+    Therefore, mne_setup_forward_model by
+    default reverses the vertex ordering before writing the fif file.
+    If, for some reason, you have counterclockwise-ordered tri files
+    available this behavior can be turned off by defining ``--noswap`` .
+    When the fif file is created, the vertex ordering is checked and
+    the process is aborted if it is incorrect after taking into account
+    the state of the swapping. Should this happen, try to run mne_setup_forward_model again including
+    the ``--noswap`` flag. In particular, if you employ the seglab software
+    to create the triangulations (see :ref:`create_bem_model`), the ``--noswap`` flag
+    is required. This option is ignored if ``--surf`` is specified
+
+``--ico <*number*>``
+
+    This option is relevant (and required) only with the ``--surf`` option and
+    if the surface files have been produced by the watershed algorithm.
+    The watershed triangulations are isomorphic with an icosahedron,
+    which has been recursively subdivided six times to yield 20480 triangles.
+    However, this number of triangles results in a long computation
+    time even in a workstation with generous amounts of memory. Therefore,
+    the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
+    in 1280 triangles. The recommended choice is ``--ico 4`` .
+
+``--homog``
+
+    Use a single compartment model (brain only) instead a three layer one
+    (scalp, skull, and brain). Only the ``inner_skull.tri`` triangulation
+    is required. This model is usually sufficient for MEG but invalid
+    for EEG. If you are employing MEG data only, this option is recommended
+    because of faster computation times. If this flag is specified,
+    the options ``--brainc`` , ``--skullc`` , and ``--scalpc`` are irrelevant.
+
+``--brainc <*conductivity/ S/m*>``
+
+    Defines the brain compartment conductivity. The default value is 0.3 S/m.
+
+``--skullc <*conductivity/ S/m*>``
+
+    Defines the skull compartment conductivity. The default value is 0.006 S/m
+    corresponding to a conductivity ratio 1/50 between the brain and
+    skull compartments.
+
+``--scalpc <*conductivity/ S/m*>``
+
+    Defines the brain compartment conductivity. The default value is 0.3 S/m.
+
+``--innershift <*value/mm*>``
+
+    Shift the inner skull surface outwards along the vertex normal directions
+    by this amount.
+
+``--outershift <*value/mm*>``
+
+    Shift the outer skull surface outwards along the vertex normal directions
+    by this amount.
+
+``--scalpshift <*value/mm*>``
+
+    Shift the scalp surface outwards along the vertex normal directions by
+    this amount.
+
+``--nosol``
+
+    Omit the BEM model geometry dependent data preparation step. This
+    can be done later by running mne_setup_forward_model without the ``--nosol`` option.
+
+``--model <*name*>``
+
+    Name for the BEM model geometry file. The model will be created into
+    the directory bem as <*name*>- ``bem.fif`` .	If
+    this option is missing, standard model names will be used (see below).
+
+
+.. _mne_setup_mri:
+
+mne_setup_mri
+=============
+
+This command sets up the directories ``subjects/$SUBJECT/mri/T1-neuromag`` and
+``subjects/$SUBJECT/mri/brain-neuromag`` .
+
+
+.. _mne_setup_source_space:
+
+mne_setup_source_space
+======================
+
+``--subject <*name*>``
+
+    Name of the subject in SUBJECTS_DIR. In the absence of this option,
+    the SUBJECT environment variable will be consulted. If it is not
+    defined, mne_setup_source_space exits
+    with an error.
+
+``--morph <*name*>``
+
+    Name of a subject in SUBJECTS_DIR. If this option is present, the source
+    space will be first constructed for the subject defined by the --subject
+    option or the SUBJECT environment variable and then morphed to this
+    subject. This option is useful if you want to create a source spaces
+    for several subjects and want to directly compare the data across
+    subjects at the source space vertices without any morphing procedure
+    afterwards. The drawback of this approach is that the spacing between
+    source locations in the "morph" subject is not going
+    to be as uniform as it would be without morphing.
+
+``--surf <*name1*>: <*name2*>:...``
+
+    FreeSurfer surface file names specifying the source surfaces, separated
+    by colons.
+
+``--spacing <*spacing/mm*>``
+
+    Specifies the approximate grid spacing of the source space in mm.
+
+``--ico <*number*>``
+
+    Instead of using the traditional method for cortical surface decimation
+    it is possible to create the source space using the topology of
+    a recursively subdivided icosahedron ( <*number*> > 0)
+    or an octahedron ( <*number*>  < 0).
+    This method uses the cortical surface inflated to a sphere as a
+    tool to find the appropriate vertices for the source space. The
+    benefit of the ``--ico`` option is that the source space will have triangulation
+    information between the decimated vertices included, which some
+    future versions of MNE software may be able to utilize. The number
+    of triangles increases by a factor of four in each subdivision,
+    starting from 20 triangles in an icosahedron and 8 triangles in
+    an octahedron. Since the number of vertices on a closed surface
+    is :math:`n_{vert} = (n_{tri} + 4) / 2`, the number of vertices in
+    the *k* th subdivision of an icosahedron and an
+    octahedron are :math:`10 \cdot 4^k +2` and :math:`4_{k + 1} + 2`,
+    respectively. The recommended values for <*number*> and
+    the corresponding number of source space locations are listed in Table 3.1.
+
+``--all``
+
+    Include all nodes to the output. The active dipole nodes are identified
+    in the fif file by a separate tag. If tri files were used as input
+    the output file will also contain information about the surface
+    triangulation. This option is always recommended to include complete
+    information.
+
+``--src <*name*>``
+
+    Output file name. Use a name <*dir*>/<*name*>-src.fif
+
+.. note:: If both ``--ico`` and ``--spacing`` options    are present the later one on the command line takes precedence.
+
+.. note:: Due to the differences between the FreeSurfer    and MNE libraries, the number of source space points generated with    the ``--spacing`` option may be different between the current    version of MNE and versions 2.5 or earlier (using ``--spacing`` option    to mne_setup_source_space ) if    the FreeSurfer surfaces employ the (old) quadrangle format or if    there are topological defects on the surfaces. All new FreeSurfer    surfaces are specified as triangular tessellations a [...]
+
+
+.. _mne_show_environment:
+
+mne_show_environment
+====================
+
+Usage: /home/larsoner/custombuilds/mne/current/bin/mne_show_environment files
+
+
+Utility command-line arguments
+##############################
+
+.. _mne_add_patch_info:
+
+mne_add_patch_info
+==================
+
+Purpose
+-------
+
+The utility mne_add_patch_info uses
+the detailed cortical surface geometry information to add data about
+cortical patches corresponding to each source space point. A new
+copy of the source space(s) included in the input file is created
+with the patch information included. In addition to the patch information, mne_add_patch_info can
+optionally calculate distances, along the cortical surface, between
+the vertices selected to the source space.
+
+.. note:: Depending on the speed of your computer and the options selected, mne_add_patch_info takes 5 - 30 minutes to run.
+
+.. _CJAGCDCC:
+
+Command line options
+--------------------
+
+mne_add_patch_info accepts
+the following command-line options:
+
+``--verbose``
+
+    Provide verbose output during the calculations.
+
+``--dist  <*dist/mm*>``
+
+    Invokes the calculation of distances between vertices included in
+    the source space along the cortical surface. Only pairs whose distance in
+    the three-dimensional volume is less than the specified distance are
+    considered. For details, see :ref:`CJAIFJDD`, below.
+
+``--src  <*name*>``
+
+    The input source space file. The source space files usually end
+    with ``-src.fif`` .
+
+``--srcp  <*name*>``
+
+    The output source space file which will contain the patch information.
+    If the file exists it will overwritten without asking for permission.
+    A recommended naming convention is to add the letter ``p`` after the
+    source spacing included in the file name. For example, if the input
+    file is ``mh-7-src.fif`` , a recommended output file name
+    is ``mh-7p-src.fif`` .
+
+``--w  <*name*>``
+
+    Name of a w file, which will contain the patch area information. Two
+    files will be created:  <*name*> ``-lh.w`` and  <*name*> ``-rh.w`` .
+    The numbers in the files are patch areas in :math:`\text{mm}^2`.
+    The source space vertices are marked with value 150.
+
+``--labeldir  <*directory*>``
+
+    Create a label file corresponding to each of the patches in the
+    given directory. The directory must be created before running mne_add_patch_info .
+
+.. _CJAIFJDD:
+
+Computational details
+---------------------
+
+By default, mne_add_patch_info creates
+a copy of the source space(s) with the following additional information
+for each vertex in the original dense triangulation of the cortex:
+
+- The number of the closest active source
+  space vertex and
+
+- The distance to this vertex.
+
+This information can be used to determine, *e.g.*,
+the sizes of the patches, their average normals, and the standard
+deviation of the normal directions. This information is also returned
+by the mne_read_source_space Matlab function as described in Table 10.28.
+
+The ``--dist`` option to mne_add_patch_info invokes
+the calculation of inter-vertex distances. These distances are computed
+along the the cortical surface (usually the white matter) on which
+the source space vertices are located.
+
+Since the calculation of all possible distances would take
+a very long time, the distance given with the ``--dist`` option allows
+restriction to the neighborhood of each source space vertex. This
+neighborhood is defined as the sphere around each source space vertex,
+with radius given by the ``--dist`` option. Because the distance calculation
+is done along the folded cortical surface whose details are given
+by the dense triangulation of the cortical surface produced by FreeSurfer,
+some of the distances computed will be larger than the value give
+with --dist.
+
+
+.. _mne_add_to_meas_info:
+
+mne_add_to_meas_info
+====================
+
+Add new data to meas info.
+
+``--add <name>``
+
+    The file to add.
+
+``--dest <name>``
+
+    the destination file.
+
+
+.. _mne_add_triggers:
+
+mne_add_triggers
+================
+
+Purpose
+-------
+
+The utility mne_add_triggers modifies
+the digital trigger channel (STI 014) in raw data files
+to include additional transitions. Since the raw data file is modified,
+it is possible to make irreversible changes. Use this utility with
+caution. It is recommended that you never run mne_add_triggers on
+an original raw data file.
+
+Command line options
+--------------------
+
+mne_add_triggers accepts
+the following command-line options:
+
+``--raw  <*name*>``
+
+    Specifies the raw data file to be modified.
+
+``--trg  <*name*>``
+
+    Specifies the trigger line modification list. This text file should
+    contain two entries per line: the sample number and the trigger
+    number to be added into the file. The number of the first sample
+    in the file is zero. It is recommended that trigger numbers whose
+    binary equivalent has lower eight bits equal to zero are used to
+    avoid conflicts with the ordinary triggers occurring in the file.
+
+``--delete``
+
+    Delete the triggers defined by the trigger file instead of adding
+    them. This enables changing the file to its original state, provided
+    that the trigger file is preserved.
+
+.. note:: Since :ref:`mne_browse_raw` and :ref:`mne_process_raw` can employ an event file which effectively adds new trigger instants, mne_add_triggers is    for the most part obsolete but it has been retained in the MNE software    suite for backward compatibility.
+
+
+
+.. _mne_annot2labels:
+
+mne_annot2labels
+================
+
+The utility mne_annot2labels converts
+cortical parcellation data into a set of labels. The parcellation
+data are read from the directory ``$SUBJECTS_DIR/$SUBJECT/label`` and
+the resulting labels are written to the current directory. mne_annot2labels requires
+that the environment variable ``$SUBJECTS_DIR`` is set.
+The command line options for mne_annot2labels are:
+
+``--subject  <*name*>``
+
+    Specifies the name of the subject. If this option is not present
+    the ``$SUBJECT`` environment variable is consulted. If
+    the subject name cannot be determined, the program quits.
+
+``--parc  <*name*>``
+
+    Specifies the parcellation name to convert. The corresponding parcellation
+    file names will be ``$SUBJECTS_DIR/$SUBJECT/label/``  <*hemi*> ``h.``  <*name*> ``.annot`` where  <*hemi*> is ``l`` or ``r`` for the
+    left and right hemisphere, respectively.
+
+
+.. _mne_anonymize:
+
+mne_anonymize
+=============
+
+Depending no the settings during acquisition in the Elekta-Neuromag EEG/MEG
+systems the data files may contain subject identifying information
+in unencrypted form. The utility mne_anonymize was
+written to clear tags containing such information from a fif file.
+Specifically, this utility removes the following tags from the fif
+file:
+
+.. _CHDEHBCG:
+
+.. table:: Tags cleared by mne_anonymize .
+
+    ========================  ==============================================
+    Tag                       Description
+    ========================  ==============================================
+    FIFF_SUBJ_FIRST_NAME      First name of the subject
+    FIFF_SUBJ_MIDDLE_NAME     Middle name of the subject
+    FIFF_SUBJ_LAST_NAME       Last name of the subject
+    FIFF_SUBJ_BIRTH_DAY       Birthday of the subject (Julian day number)
+    FIFF_SUBJ_SEX             The sex of the subject
+    FIFF_SUBJ_HAND            Handedness of the subject
+    FIFF_SUBJ_WEIGHT          Weight of the subject in kg
+    FIFF_SUBJ_HEIGHT          Height of the subject in m
+    FIFF_SUBJ_COMMENT         Comment about the subject
+    ========================  ==============================================
+
+.. note:: mne_anonymize normally    keeps the FIFF_SUBJ_HIS_ID tag which can be used to identify the    subjects uniquely after the information listed in :ref:`CHDEHBCG` have    been removed. If the ``--his`` option is specified on the command line,    the FIFF_SUBJ_HIS_ID tag will be removed as well. The data of the    tags listed in :ref:`CHDEHBCG` and the optional FIFF_SUBJ_HIS_ID    tag are overwritten with zeros and the space claimed by omitting    these tags is added to the free sp [...]
+
+mne_anonymize recognizes
+the following command-line options:
+
+``--his``
+
+    Remove the FIFF_SUBJ_HIS_ID tag as well, see above.
+
+``--file  <*name*>``
+
+    Specifies the name of the file to be modified.
+
+.. note:: You need write permission to the file to be    processed.
+
+
+.. _mne_average_forward_solutions:
+
+mne_average_forward_solutions
+=============================
+
+``--fwd <*name*> :[ <*weight*> ]``
+
+    Specifies a forward solution to include. If no weight is specified,
+    1.0 is assumed. In the averaging process the weights are divided
+    by their sum. For example, if two forward solutions are averaged
+    and their specified weights are 2 and 3, the average is formed with
+    a weight of 2/5 for the first solution and 3/5 for the second one.
+
+``--out <*name*>``
+
+    Specifies the output file which will contain the averaged forward solution.
+
+
+.. _mne_brain_vision2fiff:
+
+mne_brain_vision2fiff
+=====================
+
+The utility mne_brain_vision2fiff was
+created to import BrainVision EEG data. This utility also helps
+to import the eXimia (Nexstim) TMS-compatible EEG system data to
+the MNE software. The utility uses an optional fif file containing
+the head digitization data to allow source modeling. The MNE Matlab
+toolbox contains the function fiff_write_dig_file to
+write a digitization file based on digitization data available in
+another format, see :ref:`ch_matlab`.
+
+.. note::
+
+    mne_brain_vision2fiff reads events from the ``vmrk`` file referenced in the
+    ``vhdr`` file, but it only includes events whose "Type" is ``Stimulus`` and
+    whose "description" is given by ``S<number>``. All other events are ignored.
+
+
+The command-line options of mne_brain_vision2fiff are:
+
+``--header <*name*>``
+
+    The name of the BrainVision header file. The extension of this file
+    is ``vhdr`` . The header file typically refers to a marker
+    file (``vmrk`` ) which is automatically processed and a
+    digital trigger channel (STI 014) is formed from the marker information.
+    The ``vmrk`` file is ignored if the ``--eximia`` option
+    is present.
+
+``--dig <*name*>``
+
+    The name of the fif file containing the digitization data.
+
+``--orignames``
+
+    Use the original EEG channel labels. If this option is absent the EEG
+    channels will be automatically renamed to EEG 001, EEG 002, *etc.*
+
+``--eximia``
+
+    Interpret this as an eXimia data file. The first three channels
+    will be thresholded and interpreted as trigger channels. The composite
+    digital trigger channel will be composed in the same way as in the
+    :ref:`mne_kit2fiff` utility. In addition, the fourth channel
+    will be assigned as an EOG channel. This option is normally used
+    by the :ref:`mne_eximia2fiff` script.
+
+``--split <*size/MB*>``
+
+    Split the output data into several files which are no more than <*size*> MB.
+    By default, the output is split into files which are just below
+    2 GB so that the fif file maximum size is not exceeded.
+
+``--out <*filename*>``
+
+    Specifies the name of the output fif format data file. If <*filename*> ends
+    with ``.fif`` or ``_raw.fif`` , these endings are
+    deleted. After these modifications, ``_raw.fif`` is inserted
+    after the remaining part of the file name. If the file is split
+    into multiple parts, the additional parts will be called
+    <*name*> ``-`` <*number*> ``_raw.fif`` .
+
+
+.. _mne_change_baselines:
+
+mne_change_baselines
+====================
+
+The utility mne_change_baselines computes
+baseline values and applies them to an evoked-response data file.
+The command-line options are:
+
+``--in  <*name*>``
+
+    Specifies the input data file.
+
+``--set  <*number*>``
+
+    The data set number to compute baselines from or to apply baselines
+    to. If this option is omitted, all average data sets in the input file
+    are processed.
+
+``--out  <*name*>``
+
+    The output file.
+
+``--baselines  <*name*>``
+
+    Specifies a text file which contains the baseline values to be applied. Each
+    line should contain a channel name, colon, and the baseline value
+    given in 'native' units (T/m, T, or V). If this
+    option is encountered, the limits specified by previous ``--bmin`` and ``--bmax`` options will not
+    have an effect.
+
+``--list  <*name*>``
+
+    Specifies a text file to contain the baseline values. Listing is
+    provided only if a specific data set is selected with the ``--set`` option.
+
+``--bmin  <*value/ms*>``
+
+    Lower limit of the baseline. Effective only if ``--baselines`` option is
+    not present. Both ``--bmin`` and ``--bmax`` must
+    be present to compute the baseline values. If either ``--bmin`` or ``--bmax`` is
+    encountered, previous ``--baselines`` option will be ignored.
+
+``--bmax  <*value/ms*>``
+
+    Upper limit of the baseline.
+
+
+.. _mne_change_nave:
+
+mne_change_nave
+===============
+
+Usage: ``mne_change_nave --nave <number> <meas file> ...``
+
+
+.. _mne_check_eeg_locations:
+
+mne_check_eeg_locations
+=======================
+
+Some versions of the Neuromag acquisition software did not
+copy the EEG channel location information properly from the Polhemus
+digitizer information data block to the EEG channel information
+records if the number of EEG channels exceeds 60. The purpose of mne_check_eeg_locations is
+to detect this problem and fix it, if requested. The command-line
+options are:
+
+``--file  <*name*>``
+
+    Specify the measurement data file to be checked or modified.
+
+``--dig  <*name*>``
+
+    Name of the file containing the Polhemus digitizer information. Default
+    is the data file name.
+
+``--fix``
+
+    By default mne_check_eeg_locations only
+    checks for missing EEG locations (locations close to the origin).
+    With --fix mne_check_eeg_locations reads
+    the Polhemus data from the specified file and copies the EEG electrode
+    location information to the channel information records in the measurement
+    file. There is no harm running mne_check_eeg_locations on
+    a data file even if the EEG channel locations were correct in the
+    first place.
+
+
+.. _mne_check_surface:
+
+mne_check_surface
+=================
+This program just reads a surface file to check whether it is valid.
+
+``--surf <name>``
+
+    The input file (FreeSurfer surface format).
+
+``--bem <name>``
+
+    The input file (a BEM fif file)
+
+``--id <id>``
+
+    Surface id to list (default : 4)
+
+        * 4 for outer skin (scalp) surface
+	  * 3 for outer skull surface
+	  * 1 for inner skull surface
+
+
+``--checkmore``
+
+    Do more thorough testing
+
+
+.. _mne_collect_transforms:
+
+mne_collect_transforms
+======================
+
+The utility mne_collect_transforms collects
+coordinate transform information from various sources and saves
+them into a single fif file. The coordinate transformations used
+by MNE software are summarized in Figure 5.1. The output
+of mne_collect_transforms may
+include all transforms referred to therein except for the sensor
+coordinate system transformations :math:`T_{s_1} \dotso T_{s_n}`.
+The command-line options are:
+
+``--meas <*name*>``
+
+    Specifies a measurement data file which provides :math:`T_1`.
+    A forward solution or an inverse operator file can also be specified
+    as implied by Table 5.1.
+
+``--mri <*name*>``
+
+    Specifies an MRI description or a standalone coordinate transformation
+    file produced by mne_analyze which
+    provides :math:`T_2`. If the ``--mgh`` option
+    is not present mne_collect_transforms also
+    tries to find :math:`T_3`, :math:`T_4`, :math:`T_-`,
+    and :math:`T_+` from this file.
+
+``--mgh <*name*>``
+
+    An MRI volume volume file in mgh or mgz format.
+    This file provides :math:`T_3`. The transformation :math:`T_4` will
+    be read from the talairach.xfm file referred to in the MRI volume.
+    The fixed transforms :math:`T_-` and :math:`T_+` will
+    also be created.
+
+``--out <*name*>``
+
+    Specifies the output file. If this option is not present, the collected transformations
+    will be output on screen but not saved.
+
+
+.. _mne_compensate_data:
+
+mne_compensate_data
+===================
+
+``--in <*name*>``
+
+    Specifies the input data file.
+
+``--out <*name*>``
+
+    Specifies the output data file.
+
+``--grad <*number*>``
+
+    Specifies the desired compensation grade in the output file. The value
+    can be 1, 2, 3, or 101. The values starting from 101 will be used
+    for 4D Magnes compensation matrices.
+
+.. note:: Only average data is included in the output. Evoked-response data files produced with mne_browse_raw or mne_process_raw may    include standard errors of mean, which can not be re-compensated    using the above method and are thus omitted.
+
+.. note:: Raw data cannot be compensated using mne_compensate_data . For this purpose, load the data to mne_browse_raw or mne_process_raw , specify    the desired compensation grade, and save a new raw data file.
+
+
+.. _mne_copy_processing_history:
+
+mne_copy_processing_history
+===========================
+
+In order for the inverse operator calculation to work correctly
+with data processed with the Elekta-Neuromag Maxfilter (TM) software,
+the so-called *processing history* block must
+be included in data files. Previous versions of the MNE Matlab functions
+did not copy processing history to files saved. As of March 30,
+2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have
+been enchanced to include these data to the output file as appropriate.
+If you have older raw data files created in Matlab from input which
+has been processed Maxfilter, it is necessary to copy the *processing
+history* block from the original to modified raw data
+file using the mne_copy_processing_history utility described
+below. The raw data processing programs mne_browse_raw and mne_process_raw have
+handled copying of the processing history since revision 2.5 of
+the MNE software.
+
+mne_copy_processing_history is
+simple to use:
+
+``mne_copy_processing_history --from``  <*from*> ``--to``  <*to*> ,
+
+where  <*from*> is an
+original raw data file containing the processing history and  <*to*> is
+a file output with older MNE Matlab routines. Be careful: this operation
+cannot be undone. If the  <*from*> file
+does not have the processing history block or the  <*to*> file
+already has it, the destination file remains unchanged.
+
+
+.. _mne_convert_dig_data:
+
+mne_convert_dig_data
+====================
+
+Converts Polhemus digitization data between different file formats.
+The input formats are:
+
+``fif``
+
+    The
+    standard format used in MNE. The digitization data are typically
+    present in the measurement files.
+
+``hpts``
+
+    A text format which is a translation
+    of the fif format data, see :ref:`CJADJEBH` below.
+
+``elp``
+
+    A text format produced by the *Source
+    Signal Imaging, Inc.* software. For description of this "probe" format,
+    see http://www.sourcesignal.com/formats_probe.html.
+
+The data can be output in fif and hpts formats.
+Only the last command-line option specifying an input file will
+be honored. Zero or more output file options can be present on the
+command line.
+
+.. note:: The elp and hpts input    files may contain textual EEG electrode labels. They will not be    copied to the fif format output.
+
+The command-line options of mne_convert_dig_data are:
+
+``--fif <*name*>``
+
+    Specifies the name of an input fif file.
+
+``--hpts <*name*>``
+
+    Specifies the name of an input hpts file.
+
+``--elp <*name*>``
+
+    Specifies the name of an input elp file.
+
+``--fifout <*name*>``
+
+    Specifies the name of an output fif file.
+
+``--hptsout <*name*>``
+
+    Specifies the name of an output hpts file.
+
+``--headcoord``
+
+    The fif and hpts input
+    files are assumed to contain data in the  MNE head coordinate system,
+    see :ref:`BJEBIBAI`. With this option present, the data are
+    transformed to the MNE head coordinate system with help of the fiducial
+    locations in the data. Use this option if this is not the case or
+    if you are unsure about the definition of the coordinate system
+    of the fif and hpts input
+    data. This option is implied with elp input
+    files. If this option is present, the fif format output file will contain
+    the transformation between the original digitizer data coordinates
+    the MNE head coordinate system.
+
+.. _CJADJEBH:
+
+The hpts format
+---------------
+
+The hpts format digitzer
+data file may contain comment lines starting with the pound sign
+(#) and data lines of the form:
+
+ <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
+
+where
+
+`` <*category*>``
+
+    defines the type of points. Allowed categories are: hpi , cardinal (fiducial ),eeg ,
+    and extra corresponding to head-position
+    indicator coil locations, cardinal landmarks, EEG electrode locations,
+    and additional head surface points, respectively. Note that tkmedit does not
+    recognize the fiducial as an
+    alias for cardinal .
+
+`` <*identifier*>``
+
+    identifies the point. The identifiers are usually sequential numbers. For
+    cardinal landmarks, 1 = left auricular point, 2 = nasion, and 3
+    = right auricular point. For EEG electrodes, identifier = 0 signifies
+    the reference electrode. Some programs (not tkmedit )
+    accept electrode labels as identifiers in the eeg category.
+
+`` <*x/mm*> , <*y/mm*> , <*z/mm*>``
+
+    Location of the point, usually in the MEG head coordinate system, see :ref:`BJEBIBAI`.
+    Some programs have options to accept coordinates in meters instead
+    of millimeters. With ``--meters`` option, mne_transform_points lists
+    the coordinates in meters.
+
+
+.. _mne_convert_lspcov:
+
+mne_convert_lspcov
+==================
+
+The utility mne_convert_lspcov converts a LISP-format noise-covariance file,
+produced by the Neuromag signal processor, graph into fif format.
+
+The command-line options are:
+
+``--lspcov <*name*>``
+
+    The LISP noise-covariance matrix file to be converted.
+
+``--meas <*name*>``
+
+    A fif format measurement file used to assign channel names to the noise-covariance
+    matrix elements. This file should have precisely the same channel
+    order within MEG and EEG as the LISP-format covariance matrix file.
+
+``--out <*name*>``
+
+    The name of a fif format output file. The file name should end with
+    -cov.fif.text format output file. No information about the channel names
+    is included. The covariance matrix file is listed row by row. This
+    file can be loaded to MATLAB, for example
+
+``--outasc <*name*>``
+
+    The name of a text format output file. No information about the channel
+    names is included. The covariance matrix file is listed row by row.
+    This file can be loaded to MATLAB, for example
+
+
+.. _mne_convert_ncov:
+
+mne_convert_ncov
+================
+
+The ncov file format was used to store the noise-covariance
+matrix file. The MNE software requires that the covariance matrix
+files are in fif format. The utility mne_convert_ncov converts
+ncov files to fif format.
+
+The command-line options are:
+
+``--ncov <*name*>``
+
+    The ncov file to be converted.
+
+``--meas <*name*>``
+
+    A fif format measurement file used to assign channel names to the noise-covariance
+    matrix elements. This file should have precisely the same channel
+    order within MEG and EEG as the ncov file. Typically, both the ncov
+    file and the measurement file are created by the now mature off-line
+    averager, meg_average.
+
+
+.. _mne_convert_surface:
+
+mne_convert_surface
+===================
+
+The utility mne_convert_surface converts
+surface data files between different formats.
+
+.. note:: The MNE Matlab toolbox functions enable    reading of FreeSurfer surface files directly. Therefore, the ``--mat``   option has been removed. The dfs file format conversion functionality    has been moved here from mne_convert_dfs .    Consequently, mne_convert_dfs has    been removed from MNE software.
+
+.. _BABEABAA:
+
+command-line options
+--------------------
+
+mne_convert_surface accepts
+the following command-line options:
+
+``--fif <*name*>``
+
+    Specifies a fif format input file. The first surface (source space)
+    from this file will be read.
+
+``--tri <*name*>``
+
+    Specifies a text format input file. The format of this file is described in :ref:`BEHDEFCD`.
+
+``--meters``
+
+    The unit of measure for the vertex locations in a text input files
+    is meters instead of the default millimeters. This option does not
+    have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+``--swap``
+
+    Swap the ordering or the triangle vertices. The standard convention in
+    the MNE software is to have the vertices in text format files ordered
+    so that the vector cross product of the vectors from vertex 1 to
+    2 and 1 to 3 gives the direction of the outward surface normal. This
+    is also called the counterclockwise ordering. If your text input file
+    does not comply with this right-hand rule, use the ``--swap`` option.
+    This option does not have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+``--surf <*name*>``
+
+    Specifies a FreeSurfer format
+    input file.
+
+``--dfs <*name*>``
+
+    Specifies the name of a dfs file to be converted. The surfaces produced
+    by BrainSuite are in the dfs format.
+
+``--mghmri <*name*>``
+
+    Specifies a mgh/mgz format MRI data file which will be used to define
+    the coordinate transformation to be applied to the data read from
+    a dfs file to bring it to the FreeSurfer MRI
+    coordinates, *i.e.*, the coordinate system of
+    the MRI stack in the file. In addition, this option can be used
+    to insert "volume geometry" information to the FreeSurfer
+    surface file output (``--surfout`` option). If the input file already
+    contains the volume geometry information, --replacegeom is needed
+    to override the input volume geometry and to proceed to writing
+    the data.
+
+``--replacegeom``
+
+    Replaces existing volume geometry information. Used in conjunction
+    with the ``--mghmri`` option described above.
+
+``--fifmri <*name*>``
+
+    Specifies a fif format MRI destription file which will be used to define
+    the coordinate transformation to be applied to the data read from
+    a dfs file to bring it to the same coordinate system as the MRI stack
+    in the file.
+
+``--trans <*name*>``
+
+    Specifies the name of a text file which contains the coordinate
+    transformation to be applied to the data read from the dfs file
+    to bring it to the MRI coordinates, see below. This option is rarely
+    needed.
+
+``--flip``
+
+    By default, the dfs surface nodes are assumed to be in a right-anterior-superior
+    (RAS) coordinate system with its origin at the left-posterior-inferior
+    (LPI) corner of the MRI stack. Sometimes the dfs file has left and
+    right flipped. This option reverses this flip, *i.e.*,
+    assumes the surface coordinate system is left-anterior-superior
+    (LAS) with its origin in the right-posterior-inferior (RPI) corner
+    of the MRI stack.
+
+``--shift <*value/mm*>``
+
+    Shift the surface vertices to the direction of the surface normals
+    by this amount before saving the surface.
+
+``--surfout <*name*>``
+
+    Specifies a FreeSurfer format output file.
+
+``--fifout <*name*>``
+
+    Specifies a fif format output file.
+
+``--triout <*name*>``
+
+    Specifies an ASCII output file that will contain the surface data
+    in the triangle file format desribed in :ref:`BEHDEFCD`.
+
+``--pntout <*name*>``
+
+    Specifies a ASCII output file which will contain the vertex numbers only.
+
+``--metersout``
+
+    With this option the ASCII output will list the vertex coordinates
+    in meters instead of millimeters.
+
+``--swapout``
+
+    Defines the vertex ordering of ASCII triangle files to be output.
+    For details, see ``--swap`` option, above.
+
+``--smfout <*name*>``
+
+    Specifies a smf (Simple Model Format) output file. For details of this
+    format, see http://people.sc.fsu.edu/~jburkardt/data/smf/smf.txt.
+
+.. note:: Multiple output options can be specified to    produce outputs in several different formats with a single invocation    of mne_convert_surface .
+
+The coordinate transformation file specified with the ``--trans`` should contain
+a 4 x 4 coordinate transformation matrix:
+
+.. math::    T = \begin{bmatrix}
+		R_{11} & R_{12} & R_{13} & x_0 \\
+		R_{13} & R_{13} & R_{13} & y_0 \\
+		R_{13} & R_{13} & R_{13} & z_0 \\
+		0 & 0 & 0 & 1
+		\end{bmatrix}
+
+defined so that if the augmented location vectors in the
+dfs file and MRI coordinate systems are denoted by :math:`r_{dfs} = [x_{dfs} y_{dfs} z_{dfs} 1]^T` and :math:`r_{MRI} = [x_{MRI} y_{MRI} z_{MRI} 1]^T`,
+respectively,
+
+.. math::    r_{MRI} = Tr_{dfs}
+
+
+.. _mne_cov2proj:
+
+mne_cov2proj
+============
+
+Purpose
+-------
+
+The utility mne_cov2proj picks
+eigenvectors from a covariance matrix and outputs them as a signal-space
+projection (SSP) file.
+
+Command line options
+--------------------
+
+mne_cov2proj accepts the
+following command-line options:
+
+``--cov  <*name*>``
+
+    The covariance matrix file to be used a source. The covariance matrix
+    files usually end with ``-cov.fif`` .
+
+``--proj  <*name*>``
+
+    The output file to contain the projection. It is recommended that
+    the file name ends with ``-proj.fif`` .
+
+``--bad  <*name*>``
+
+    Specify channels not to be included when an eigenvalue decomposition
+    of the covariance matrix is computed.
+
+``--include  <*val1*> [: <*val2*> ]``
+
+    Select an eigenvector or a range of eigenvectors to include. It
+    is recommended that magnetometers, gradiometers, and EEG data are handled
+    separately with help of the ``--bad`` , ``--meg`` , ``--megmag`` , ``--meggrad`` ,
+    and ``--eeg`` options.
+
+``--meg``
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG channels are included.
+
+``--eeg``
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to EEG channels are included.
+
+``--megmag``
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG magnetometer channels are included.
+
+``--meggrad``
+
+    After loading the covariance matrix, modify it so that only elements corresponding
+    to MEG planar gradiometer channels are included.
+
+.. note:: The ``--megmag`` and ``--meggrad`` employ    the Vectorview channel numbering scheme to recognize MEG magnetometers    (channel names ending with '1') and planar gradiometers    (other channels). Therefore, these options are only meaningful in    conjunction with data acquired with a Neuromag Vectorview system.
+
+
+.. _mne_create_comp_data:
+
+mne_create_comp_data
+====================
+
+``--in <*name*>``
+
+    Specifies the input text file containing the compensation data.
+
+``--kind <*value*>``
+
+    The compensation type to be stored in the output file with the data. This
+    value defaults to 101 for the Magnes compensation and does not need
+    to be changed.
+
+``--out <*name*>``
+
+    Specifies the output fif file containing the compensation channel weight
+    matrix :math:`C_{(k)}`, see :ref:`BEHDDFBI`.
+
+The format of the text-format compensation data file is:
+
+ <*number of MEG helmet channels*> <*number of compensation channels included*>
+ <*cname_1*> <*cname_2*> ...
+ <*name_1*> <*weights*>
+ <*name_2*> <*weights*> ...
+
+In the above <*name_k*> denote
+names of MEG helmet channels and <*cname_k*>
+those of the compensation channels, respectively. If the channel
+names contain spaces, they must be surrounded by quotes, for example, ``"MEG 0111"`` .
+
+
+
+.. _mne_ctf2fiff:
+
+mne_ctf2fiff
+============
+
+``--verbose``
+
+    Produce a verbose listing of the conversion process to stdout.
+
+``--ds <*directory*>``
+
+    Read the data from this directory
+
+``--omit <*filename*>``
+
+    Read the names of channels to be omitted from this text file. Enter one
+    channel name per line. The names should match exactly with those
+    listed in the CTF data structures. By default, all channels are included.
+
+``--fif <*filename*>``
+
+    The name of the output file. If the length of the raw data exceeds
+    the 2-GByte fif file limit, several output files will be produced.
+    These additional 'extension' files will be tagged
+    with ``_001.fif`` , ``_002.fif`` , etc.
+
+``--evoked``
+
+    Produce and evoked-response fif file instead of a raw data file.
+    Each trial in the CTF data file is included as a separate category
+    (condition). The maximum number of samples in each trial is limited
+    to 25000.
+
+``--infoonly``
+
+    Write only the measurement info to the output file, do not include data.
+
+During conversion, the following files are consulted from
+the ds directory:
+
+`` <*name*> .res4``
+
+    This file contains most of the header information pertaining the acquisition.
+
+`` <*name*> .hc``
+
+    This file contains the HPI coil locations in sensor and head coordinates.
+
+`` <*name*> .meg4``
+
+    This file contains the actual MEG data. If the data are split across several
+    files due to the 2-GByte file size restriction, the 'extension' files
+    are called <*name*> ``.`` <*number*> ``_meg4`` .
+
+`` <*name*> .eeg``
+
+    This is an optional input file containing the EEG electrode locations. More
+    details are given below.
+
+If the <*name*> ``.eeg`` file,
+produced from the Polhemus data file with CTF software, is present,
+it is assumed to contain lines with the format:
+
+ <*number*> <*name*> <*x/cm*> <*y/cm*> <*z/cm*>
+
+The field <*number*> is
+a sequential number to be assigned to the converted data point in
+the fif file. <*name*> is either
+a name of an EEG channel, one of ``left`` , ``right`` ,
+or ``nasion`` to indicate a fiducial landmark, or any word
+which is not a name of any channel in the data. If <*name*> is
+a name of an EEG channel available in the data, the location is
+included in the Polhemus data as an EEG electrode locations and
+inserted as the location of the EEG electrode. If the name is one
+of the fiducial landmark names, the point is included in the Polhemus
+data as a fiducial landmark. Otherwise, the point is included as
+an additional head surface points.
+
+The standard ``eeg`` file produced by CTF software
+does not contain the fiducial locations. If desired, they can be
+manually copied from the ``pos`` file which was the source
+of the ``eeg`` file.
+
+.. note:: In newer CTF data the EEG position information maybe present in the ``res4`` file. If the ``eeg`` file    is present, the positions given there take precedence over the information in the ``res4`` file.
+
+.. note:: mne_ctf2fiff converts both epoch mode and continuous raw data file into raw data fif files. It is not advisable to use epoch mode files with time gaps between the epochs because the data will be discontinuous in the resulting fif file with jumps at the junctions between epochs. These discontinuities    produce artefacts if the raw data is filtered in mne_browse_raw , mne_process_raw ,    or graph .
+
+.. note:: The conversion process includes a transformation from the CTF head coordinate system convention to that used in the Neuromag systems.
+
+
+.. _mne_ctf_dig2fiff:
+
+mne_ctf_dig2fiff
+================
+
+The input data to mne_ctf_dig2fiff is
+a text file, which contains the coordinates of the digitization
+points in centimeters. The first line should contain a single number
+which is the number of points listed in the file. Each of the following
+lines contains a sequential number of the point, followed by the
+three coordinates. mne_ctf_dig2fiff ignores
+any text following the :math:`z` coordinate
+on each line. If the ``--numfids`` option is specified,
+the first three points indicate the three fiducial locations (1
+= nasion, 2 = left auricular point, 3 = right auricular point).
+Otherwise, the input file must end with three lines beginning with ``left`` , ``right`` ,
+or ``nasion`` to indicate the locations of the fiducial
+landmarks, respectively.
+
+.. note:: The sequential numbers should be unique within a file. I particular, the numbers 1, 2, and 3 must not be appear more than once if the ``--numfids`` options is used.
+
+The command-line options for mne_ctf_dig2fiff are:
+
+``--dig <*name*>``
+
+    Specifies the input data file in CTF output format.
+
+``--numfids``
+
+    Fiducial locations are numbered instead of labeled, see above.
+
+``--hpts <*name*>``
+
+    Specifies the output hpts file. The format of this text file is
+    described in :ref:`CJADJEBH`.
+
+``--fif <*name*>``
+
+    Specifies the output fif file.
+
+
+.. _mne_dicom_essentials:
+
+mne_dicom_essentials
+====================
+
+Print essential information about a dicom file.
+
+``--in <name>``
+
+    The input file.
+
+
+.. _mne_edf2fiff:
+
+mne_edf2fiff
+============
+
+The mne_edf2fiff allows
+conversion of EEG data from EDF, EDF+, and BDF formats to the fif
+format. Documentation for these three input formats can be found
+at:
+
+``EDF:``
+
+    http://www.edfplus.info/specs/edf.html
+
+``EDF+:``
+
+    http://www.edfplus.info/specs/edfplus.html
+
+``BDF:``
+
+    http://www.biosemi.com/faq/file_format.htm
+
+EDF (European Data Format) and EDF+ are 16-bit formats while
+BDF is a 24-bit variant of this format used by the EEG systems manufactured
+by a company called BioSemi.
+
+None of these formats support electrode location information
+and  head shape digitization information. Therefore, this information
+has to be provided separately. Presently hpts and elp file formats
+are supported to include digitization data. For information on these
+formats, see :ref:`CJADJEBH` and http://www.sourcesignal.com/formats_probe.html.
+Note that it is mandatory to have the three fiducial locations (nasion
+and the two auricular points) included in the digitization data.
+Using the locations of the fiducial points the digitization data
+are converted to the MEG head coordinate system employed in the
+MNE software, see :ref:`BJEBIBAI`. In the comparison of the
+channel names only the initial segment up to the first '-' (dash)
+in the EDF/EDF+/BDF channel name is significant.
+
+The EDF+ files may contain an annotation channel which can
+be used to store trigger information. The Time-stamped Annotation
+Lists (TALs) on the annotation  data can be converted to a trigger
+channel (STI 014) using an annotation map file which associates
+an annotation label with a number on the trigger channel. The TALs
+can be listed with the ``--listtal`` option,
+see below.
+
+.. warning:: The data samples in a BDF file    are represented in a 3-byte (24-bit) format. Since 3-byte raw data    buffers are not presently supported in the fif format    these data will be changed to 4-byte integers in the conversion.    Since the maximum size of a fif file is 2 GBytes, the maximum size of    a BDF file to be converted is approximately 1.5 GBytes
+
+.. warning:: The EDF/EDF+/BDF formats support channel    dependent sampling rates. This feature is not supported by mne_edf2fiff .    However, the annotation channel in the EDF+ format can have a different    sampling rate. The annotation channel data is not included in the    fif files output.
+
+Using mne_edf2fiff
+------------------
+
+The command-line options of mne_edf2fiff are:
+
+``--edf <*filename*>``
+
+    Specifies the name of the raw data file to process.
+
+``--tal <*filename*>``
+
+    List the time-stamped annotation list (TAL) data from an EDF+ file here.
+    This output is useful to assist in creating the annotation map file,
+    see the ``--annotmap`` option, below.
+    This output file is an event file compatible with mne_browse_raw and mne_process_raw ,
+    see :ref:`ch_browse`. In addition, in the mapping between TAL
+    labels and trigger numbers provided by the ``--annotmap`` option is
+    employed to assign trigger numbers in the event file produced. In
+    the absence of the ``--annotmap`` option default trigger number 1024
+    is used.
+
+``--annotmap <*filename*>``
+
+    Specify a file which maps the labels of the TALs to numbers on a trigger
+    channel (STI 014) which will be added to the output file if this
+    option is present. This annotation map file
+    may contain comment lines starting with the '%' or '#' characters.
+    The data lines contain a label-number pair, separated by a colon.
+    For example, a line 'Trigger-1:9' means that each
+    annotation labeled with the text 'Trigger-1' will
+    be translated to the number 9 on the trigger channel.
+
+``--elp <*filename*>``
+
+    Specifies the name of the an electrode location file. This file
+    is in the "probe" file format used by the *Source
+    Signal Imaging, Inc.* software. For description of the
+    format, see http://www.sourcesignal.com/formats_probe.html. Note
+    that some other software packages may produce electrode-position
+    files with the elp ending not
+    conforming to the above specification. As discussed above, the fiducial
+    marker locations, optional in the "probe" file
+    format specification are mandatory for mne_edf2fiff .
+    When this option is encountered on the command line any previously
+    specified hpts file will be ignored.
+
+``--hpts <*filename*>``
+
+    Specifies the name of an electrode position file in  the hpts format discussed
+    in :ref:`CJADJEBH`. The mandatory entries are the fiducial marker
+    locations and the EEG electrode locations. It is recommended that
+    electrode (channel) names instead of numbers are used to label the
+    EEG electrode locations. When this option is encountered on the
+    command line any previously specified elp file
+    will be ignored.
+
+``--meters``
+
+    Assumes that the digitization data in an hpts file
+    is given in meters instead of millimeters.
+
+``--fif <*filename*>``
+
+    Specifies the name of the fif file to be output.
+
+Post-conversion tasks
+---------------------
+
+This section outlines additional steps to be taken to use
+the EDF/EDF+/BDF file is converted to the fif format in MNE:
+
+- Some of the channels may not have a
+  digitized electrode location associated with them. If these channels
+  are used for EOG or EMG measurements, their channel types should
+  be changed to the correct ones using the :ref:`mne_rename_channels` utility,
+  EEG channels which do not have a location
+  associated with them should be assigned to be MISC channels.
+
+- After the channel types are correctly defined, a topographical
+  layout file can be created for mne_browse_raw and mne_analyze using
+  the :ref:`mne_make_eeg_layout` utility.
+
+- The trigger channel name in BDF files is "Status".
+  This must be specified with the ``--digtrig`` option or with help of
+  the MNE_TRIGGER_CH_NAME environment variable when :ref:`mne_browse_raw` or
+  :ref:`mne_process_raw` is invoked.
+
+- Only the two least significant bytes on the "Status" channel
+  of BDF files are significant as trigger information the ``--digtrigmask``
+  0xff option MNE_TRIGGER_CH_MASK environment variable should be used
+  to specify this to :ref:`mne_browse_raw` and :ref:`mne_process_raw`,
+
+
+.. _mne_epochs2mat:
+
+mne_epochs2mat
+==============
+
+The utility mne_epochs2mat converts
+epoch data including all or selected channels from a raw data file
+to a simple binary file with an associated description file in Matlab
+mat file format. With help of the description file, a matlab program
+can easily read the epoch data from the simple binary file. Signal
+space projection and bandpass filtering can be optionally applied
+to the raw data prior to saving the epochs.
+
+.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides direct    access to raw fif files without conversion with mne_epochs2mat first.    Therefore, it is recommended that you use the Matlab toolbox rather than mne_epochs2mat which    creates large files occupying disk space unnecessarily. An exception    to this is the case where you apply a filter to the data and save    the band-pass filtered epochs.
+
+Command-line options
+--------------------
+
+mne_epochs2mat accepts
+the following command-line options are:
+
+``--raw <*name*>``
+
+    Specifies the name of the raw data fif file to use as input.
+
+``--mat <*name*>``
+
+    Specifies the name of the destination file. Anything following the last
+    period in the file name will be removed before composing the output
+    file name. The binary epoch file will be called <*trimmed name*> ``.epochs`` and
+    the corresponding Matlab description file will be <*trimmed name*> ``_desc.mat`` .
+
+``--tag <*tag*>``
+
+    By default, all Matlab variables included in the description file
+    start with ``mne\_``. This option changes the prefix to <*tag*> _.
+
+``--events <*name*>``
+
+    The file containing the event definitions. This can be a text or
+    fif format file produced by :ref:`mne_process_raw` or
+    :ref:`mne_browse_raw`. With help of this file it is possible
+    to select virtually any data segment from the raw data file. If
+    this option is missing, the digital trigger channel in the raw data
+    file or a fif format event file produced automatically by mne_process_raw or mne_browse_raw is
+    consulted for event information.
+
+``--event <*name*>``
+
+    Event number identifying the epochs of interest.
+
+``--tmin <*time/ms*>``
+
+    The starting point of the epoch with respect to the event of interest.
+
+``--tmax <*time/ms*>``
+
+    The endpoint of the epoch with respect to the event of interest.
+
+``--sel <*name*>``
+
+    Specifies a text file which contains the names of the channels to include
+    in the output file, one channel name per line. If the ``--inv`` option
+    is specified, ``--sel`` is ignored. If neither ``--inv`` nor ``--sel`` is
+    present, all MEG and EEG channels are included. The digital trigger
+    channel can be included with the ``--includetrig`` option, described
+    below.
+
+``--inv <*name*>``
+
+    Specifies an inverse operator, which will be employed in two ways. First,
+    the channels included to output will be those included in the inverse
+    operator. Second, any signal-space projection operator present in
+    the inverse operator file will be applied to the data. This option
+    cancels the effect of ``--sel`` and ``--proj`` options.
+
+``--digtrig <*name*>``
+
+    Name of the composite digital trigger channel. The default value
+    is 'STI 014'. Underscores in the channel name
+    will be replaced by spaces.
+
+``--digtrigmask <*number*>``
+
+    Mask to be applied to the trigger channel values before considering them.
+    This option is useful if one wants to set some bits in a don't care
+    state. For example, some finger response pads keep the trigger lines
+    high if not in use, *i.e.*, a finger is not in
+    place. Yet, it is convenient to keep these devices permanently connected
+    to the acquisition system. The number can be given in decimal or
+    hexadecimal format (beginning with 0x or 0X). For example, the value
+    255 (0xFF) means that only the lowest order byte (usually trigger
+    lines 1 - 8 or bits 0 - 7) will be considered.
+
+``--includetrig``
+
+    Add the digital trigger channel to the list of channels to output.
+    This option should not be used if the trigger channel is already
+    included in the selection specified with the ``--sel`` option.
+
+``--filtersize <*size*>``
+
+    Adjust the length of the FFT to be applied in filtering. The number will
+    be rounded up to the next power of two. If the size is :math:`N`,
+    the corresponding length of time is :math:`^N/_{f_s}`,
+    where :math:`f_s` is the sampling frequency
+    of your data. The filtering procedure includes overlapping tapers
+    of length :math:`^N/_2` so that the total FFT
+    length will actually be :math:`2N`. The default
+    value is 4096.
+
+``--highpass <*value/Hz*>``
+
+    Highpass filter frequency limit. If this is too low with respect
+    to the selected FFT length and data file sampling frequency, the
+    data will not be highpass filtered. You can experiment with the
+    interactive version to find the lowest applicable filter for your
+    data. This value can be adjusted in the interactive version of the
+    program. The default is 0, i.e., no highpass filter in effect.
+
+``--highpassw <*value/Hz*>``
+
+    The width of the transition band of the highpass filter. The default
+    is 6 frequency bins, where one bin is :math:`^{f_s}/_{(2N)}`.
+
+``--lowpass <*value/Hz*>``
+
+    Lowpass filter frequency limit. This value can be adjusted in the interactive
+    version of the program. The default is 40 Hz.
+
+``--lowpassw <*value/Hz*>``
+
+    The width of the transition band of the lowpass filter. This value
+    can be adjusted in the interactive version of the program. The default
+    is 5 Hz.
+
+``--filteroff``
+
+    Do not filter the data.
+
+``--proj <*name*>``
+
+    Include signal-space projection (SSP) information from this file.
+    If the ``--inv`` option is present, ``--proj`` has
+    no effect.
+
+.. note:: Baseline has not been subtracted from the epochs. This has to be done in subsequent processing with Matlab if so desired.
+
+.. note:: Strictly speaking, trigger mask value zero would mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+
+.. note:: The digital trigger channel can also be set with the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_browse_raw or mne_process_raw .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+
+.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
+
+The binary epoch data file
+--------------------------
+
+mne_epochs2mat saves the
+epoch data extracted from the raw data file is a simple binary file.
+The data are stored as big-endian single-precision floating point
+numbers. Assuming that each of the total of :math:`p` epochs
+contains :math:`n` channels and :math:`m` time
+points, the data :math:`s_{jkl}` are ordered
+as
+
+.. math::    s_{111} \dotso s_{1n1} s_{211} \dotso s_{mn1} \dotso s_{mnp}\ ,
+
+where the first index stands for the time point, the second
+for the channel, and the third for the epoch number, respectively.
+The data are not calibrated, i.e., the calibration factors present
+in the Matlab description file have to be applied to get to physical
+units as described below.
+
+.. note:: The maximum size of an epoch data file is 2 Gbytes, *i.e.*, 0.5 Gsamples.
+
+Matlab data structures
+----------------------
+
+The Matlab description files output by mne_epochs2mat contain
+a data structure <*tag*>_epoch_info .
+The fields of the this structure are listed in :ref:`BEHFDCIH`.
+Further explanation of the epochs member
+is provided in :ref:`BEHHAGHE`.
+
+
+.. tabularcolumns:: |p{0.15\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
+.. _BEHIFJIJ:
+.. table:: The fields of the raw data info structure.
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | orig_file             | string          | The name of the original fif file specified with the       |
+    |                       |                 | ``--raw`` option.                                          |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | epoch_file            | string          | The name of the epoch data file produced by mne_epocs2mat. |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nchan                 | 1               | Number of channels.                                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nepoch                | 1               | Total number of epochs.                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | epochs                | nepoch x 5      | Description of the content of the epoch data file,         |
+    |                       |                 | see :ref:`BEHHAGHE`.                                       |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_names              | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG, 2 =   |
+    |                       |                 | EEG). The second column lists the coil types, see          |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
+    |                       |                 | The unit of the data is listed in the first column         |
+    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
+    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate        |
+    |                       |                 | system. For EEG electrodes the first vector after the      |
+    |                       |                 | electrode location specifies the location of the reference |
+    |                       |                 | electrode. If the reference is not specified this value is |
+    |                       |                 | all zeroes. The remaining unit vectors are irrelevant for  |
+    |                       |                 | EEG electrodes.                                            |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat are not calibrated.     |
+    |                       |                 | The first column is the range member of the fiff data      |
+    |                       |                 | structures and while the second is the cal member. To      |
+    |                       |                 | get calibrated data values in the units given in           |
+    |                       |                 | ch_units from the raw data, the data must be multiplied    |
+    |                       |                 | with the product of range and cal .                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
+.. _BEHHAGHE:
+.. table:: The epochs member of the raw data info structure.
+
+    +---------------+------------------------------------------------------------------+
+    | Column        | Contents                                                         |
+    +---------------+------------------------------------------------------------------+
+    | 1             | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte   |
+    |               | signed integer, 4 = single-precision float). The epoch data are  |
+    |               | written using the big-endian byte order. The data are stored     |
+    |               | sample by sample.                                                |
+    +---------------+------------------------------------------------------------------+
+    | 2             | Byte location of this epoch in the binary epoch file.            |
+    +---------------+------------------------------------------------------------------+
+    | 3             | First sample of this epoch in the original raw data file.        |
+    +---------------+------------------------------------------------------------------+
+    | 4             | First sample of the epoch with respect to the event.             |
+    +---------------+------------------------------------------------------------------+
+    | 5             | Number of samples in the epoch.                                  |
+    +---------------+------------------------------------------------------------------+
+
+.. note:: For source modelling purposes, it is recommended    that the MNE Matlab toolbox, see :ref:`ch_matlab` is employed    to read the measurement info instead of using the channel information    in the raw data info structure described in :ref:`BEHIFJIJ`.
+
+
+.. _mne_evoked_data_summary:
+
+mne_evoked_data_summary
+=======================
+
+Print a summary of evoked-response data sets in a file (averages only).
+
+``--in <name>``
+
+    The input file.
+
+
+.. _mne_eximia2fiff:
+
+mne_eximia2fiff
+===============
+
+Usage:
+
+``mne_eximia2fiff`` [``--dig`` dfile ] [``--orignames`` ] file1 file2 ...
+
+where file1 file2 ...
+are eXimia ``nxe`` files and the ``--orignames`` option
+is passed on to :ref:`mne_brain_vision2fiff`.
+If you want to convert all data files in a directory, say
+
+``mne_eximia2fiff *.nxe``
+
+The optional file specified with the ``--dig`` option is assumed
+to contain digitizer data from the recording in the Nexstim format.
+The resulting fif data file will contain these data converted to
+the fif format as well as the coordinate transformation between
+the eXimia digitizer and MNE head coordinate systems.
+
+.. note:: This script converts raw data files only.
+
+
+.. _mne_fit_sphere_to_surf:
+
+mne_fit_sphere_to_surf
+======================
+
+Purpose
+-------
+
+The utility mne_fit_sphere_to_surf finds
+the sphere which best fits a given surface.
+
+Command line options
+--------------------
+
+mne_fit_sphere_to_surf accepts
+the following command-line options:
+
+``--bem  <*name*>``
+
+    A BEM file to use. The names of these files usually end with ``bem.fif`` or ``bem-sol.fif`` .
+
+``--surf  <*name*>``
+
+    A FreeSurfer surface file to read. This is an alternative to using
+    a surface from the BEM file.
+
+``--scalp``
+
+    Use the scalp surface instead of the inner skull surface in sphere
+    fitting. If the surface is specified with the ``--surf`` option,
+    this one is irrelevant.
+
+``--mritrans  <*name*>``
+
+    A file containing a transformation matrix between the MEG head coordinates
+    and MRI coordinates. With this option, the sphere origin will be
+    output in MEG head coordinates. Otherwise the output will be in MRI
+    coordinates.
+
+
+.. _mne_fix_mag_coil_types:
+
+mne_fix_mag_coil_types
+======================
+
+The purpose of mne_fix_mag_coil_types is
+to change coil type 3022 to 3024 in the MEG channel definition records
+in the data files specified on the command line.
+
+As shown in Tables 5.2 and 5.3, the Neuromag Vectorview systems
+can contain magnetometers with two different coil sizes (coil types
+3022 and 3023 vs. 3024). The systems incorporating coils of type
+3024 were introduced last. At some sites the data files have still
+defined the magnetometers to be of type 3022 to ensure compatibility
+with older versions of Neuromag software. In the MNE software as
+well as in the present version of Neuromag software coil type 3024
+is fully supported. Therefore, it is now safe to upgrade the data
+files to use the true coil type.
+
+If the ``--magnes`` option is specified, the 4D
+Magnes magnetometer coil type (4001) is changed to 4D Magnes gradiometer
+coil type (4002). Use this option always and *only
+if* your Magnes data comes from a system with axial gradiometers
+instead of magnetometers. The fif converter included with the Magnes
+system does not assign the gradiometer coil type correctly.
+
+.. note:: The effect of the difference between the coil sizes of magnetometer types 3022 and 3024 on the current estimates computed by the MNE software is very small. Therefore the use of mne_fix_mag_coil_types is not mandatory.
+
+
+.. _mne_fix_stim14:
+
+mne_fix_stim14
+==============
+
+Some earlier versions of the Neuromag acquisition software
+had a problem with the encoding of the eighth bit on the digital
+stimulus channel STI 014. This problem has been now fixed. Old data
+files can be fixed with mne_fix_stim14 ,
+which takes raw data file names as arguments. mne_fix_stim14 also
+changes the calibration of STI 014 to unity. If the encoding of
+STI 014 is already correct, running mne_fix_stim14 will
+not have any effect on the raw data.
+
+In newer Neuromag Vectorview systems with 16-bit digital
+inputs the upper two bytes of the samples may be incorrectly set
+when stimulus input 16 is used and the data are acquired in the
+32-bit  mode. This problem can be fixed by running mne_fix_stim14 on
+a raw data file with the ``--32`` option:
+
+``mne_fix_stim14 --32``  <*raw data file*>
+
+In this case, the correction will be applied to the stimulus
+channels 'STI101' and 'STI201'.
+
+
+.. _mne_flash_bem:
+
+mne_flash_bem
+=============
+
+``--help``
+
+    Prints the usage information.
+
+``--usage``
+
+    Prints the usage information.
+
+``--noconvert``
+
+    Skip conversion of the original MRI data. The original data are
+    not needed and the preparatory steps 1.-3. listed below
+    are thus not needed.
+
+``--noflash30``
+
+    The 30-degree flip angle data are not used.
+
+``--unwarp  <*type*>``
+
+    Run grad_unwarp with ``--unwarp``  <*type*> option on each of the converted
+    data sets.
+
+
+.. _mne_insert_4D_comp:
+
+mne_insert_4D_comp
+==================
+
+Import Magnes WH3600 reference channel data from a text file.
+
+``--in <name>``
+
+    The name of the fif file containing the helmet data.
+
+``--ref <name>``
+
+    The name of the text file containing the reference channel data.
+
+``--out <name>``
+
+    The output fif file.
+
+
+.. _mne_kit2fiff:
+
+mne_kit2fiff
+============
+
+The utility mne_kit2fiff was
+created in collaboration with Alec Maranz and Asaf Bachrach to import
+their MEG data acquired with the 160-channel KIT MEG system to MNE
+software.
+
+To import the data, the following input files are mandatory:
+
+- The Polhemus data file (elp file)
+  containing the locations of the fiducials and the head-position
+  indicator (HPI) coils. These data are usually given in the CTF/4D
+  head coordinate system. However, mne_kit2fiff does
+  not rely on this assumption. This file can be exported directly from
+  the KIT system.
+
+- A file containing the locations of the HPI coils in the MEG
+  device coordinate system. These data are used together with the elp file
+  to establish the coordinate transformation between the head and
+  device coordinate systems. This file can be produced easily by manually
+  editing one of the files exported by the KIT system.
+
+- A sensor data file (sns file)
+  containing the locations and orientations of the sensors. This file
+  can be exported directly from the KIT system.
+
+.. note:: The output fif file will use the Neuromag head    coordinate system convention, see :ref:`BJEBIBAI`. A coordinate    transformation between the CTF/4D head coordinates and the Neuromag    head coordinates is included. This transformation can be read with    MNE Matlab Toolbox routines, see :ref:`ch_matlab`.
+
+The following input files are optional:
+
+- A head shape data file (hsp file)
+  containing locations of additional points from the head surface.
+  These points must be given in the same coordinate system as that
+  used for the elp file and the
+  fiducial locations must be within 1 mm from those in the elp file.
+
+- A raw data file containing the raw data values, sample by
+  sample, as text. If this file is not specified, the output fif file
+  will only contain the measurement info block.
+
+By default mne_kit2fiff includes
+the first 157 channels, assumed to be the MEG channels, in the output
+file. The compensation channel data are not converted by default
+but can be added, together with other channels, with the ``--type`` .
+The channels from 160 onwards are designated as miscellaneous input
+channels (MISC 001, MISC 002, etc.). The channel names and types
+of these channels can be afterwards changed with the :ref:`mne_rename_channels`
+utility. In addition, it is possible to synthesize
+the digital trigger channel (STI 014) from available analog
+trigger channel data, see the ``--stim`` option, below.
+The synthesized trigger channel data value at sample :math:`k` will
+be:
+
+.. math::    s(k) = \sum_{p = 1}^n {t_p(k) 2^{p - 1}}\ ,
+
+where :math:`t_p(k)` are the thresholded
+from the input channel data d_p(k):
+
+.. math::    t_p(k) = \Bigg\{ \begin{array}{l}
+		 0 \text{  if  } d_p(k) \leq t\\
+		 1 \text{  if  } d_p(k) > t
+	     \end{array}\ .
+
+The threshold value :math:`t` can
+be adjusted with the ``--stimthresh`` option, see below.
+
+mne_kit2fiff accepts the following command-line options:
+
+``--elp <*filename*>``
+
+    The name of the file containing the locations of the fiducials and
+    the HPI coils. This option is mandatory.
+
+``--hsp <*filename*>``
+
+    The name of the file containing the locations of the fiducials and additional
+    points on the head surface. This file is optional.
+
+``--sns <*filename*>``
+
+    The name of file containing the sensor locations and orientations. This
+    option is mandatory.
+
+``--hpi <*filename*>``
+
+    The name of a text file containing the locations of the HPI coils
+    in the MEG device coordinate frame, given in millimeters. The order of
+    the coils in this file does not have to be the same as that in the elp file.
+    This option is mandatory.
+
+``--raw <*filename*>``
+
+    Specifies the name of the raw data file. If this file is not specified, the
+    output fif file will only contain the measurement info block.
+
+``--sfreq <*value/Hz*>``
+
+    The sampling frequency of the data. If this option is not specified, the
+    sampling frequency defaults to 1000 Hz.
+
+``--lowpass <*value/Hz*>``
+
+    The lowpass filter corner frequency used in the data acquisition.
+    If not specified, this value defaults to 200 Hz.
+
+``--highpass <*value/Hz*>``
+
+    The highpass filter corner frequency used in the data acquisition.
+    If not specified, this value defaults to 0 Hz (DC recording).
+
+``--out <*filename*>``
+
+    Specifies the name of the output fif format data file. If this file
+    is not specified, no output is produced but the elp , hpi ,
+    and hsp files are processed normally.
+
+``--stim <*chs*>``
+
+    Specifies a colon-separated list of numbers of channels to be used
+    to synthesize a digital trigger channel. These numbers refer to
+    the scanning order channels as listed in the sns file,
+    starting from one. The digital trigger channel will be the last
+    channel in the file. If this option is absent, the output file will
+    not contain a trigger channel.
+
+``--stimthresh <*value*>``
+
+    The threshold value used when synthesizing the digital trigger channel,
+    see above. Defaults to 1.0.
+
+``--add <*chs*>``
+
+    Specifies a colon-separated list of numbers of channels to include between
+    the 157 default MEG channels and the digital trigger channel. These
+    numbers refer to the scanning order channels as listed in the sns file,
+    starting from one.
+
+.. note:: The mne_kit2fiff utility    has not been extensively tested yet.
+
+
+.. _mne_list_bem:
+
+mne_list_bem
+============
+
+The utility mne_list_bem outputs
+the BEM meshes in text format. The default output data contains
+the *x*, *y*, and *z* coordinates
+of the vertices, listed in millimeters, one vertex per line.
+
+The command-line options are:
+
+``--bem <*name*>``
+
+    The BEM file to be listed. The file name normally ends with -bem.fif or -bem-sol.fif .
+
+``--out <*name*>``
+
+    The output file name.
+
+``--id <*number*>``
+
+    Identify the surface to be listed. The surfaces are numbered starting with
+    the innermost surface. Thus, for a three-layer model the surface numbers
+    are: 4 = scalp, 3 = outer skull, 1 = inner skull
+    Default value is 4.
+
+``--gdipoli``
+
+    List the surfaces in the format required by Thom Oostendorp's
+    gdipoli program. This is also the default input format for mne_surf2bem .
+
+``--meters``
+
+    List the surface coordinates in meters instead of millimeters.
+
+``--surf``
+
+    Write the output in the binary FreeSurfer format.
+
+``--xfit``
+
+    Write a file compatible with xfit. This is the same effect as using
+    the options ``--gdipoli`` and ``--meters`` together.
+
+
+.. _mne_list_coil_def:
+
+mne_list_coil_def
+=================
+
+List available coil definitions.
+
+``--in <def>``
+
+    Validate a coil definition file.
+
+``--out <name>``
+
+    List the coil definitions to this file (default: stdout).
+
+``--type <type>``
+
+    Coil type to list.
+
+
+.. _mne_list_proj:
+
+mne_list_proj
+=============
+
+``--in <name>``
+
+    Input file.
+
+``--ascin <name>``
+
+    Input file.
+
+``--exclude <name>``
+
+    Exclude these channels from the projection (set entries to zero).
+
+``--chs <name>``
+
+    Specify a file which contains a channel selection for the output (useful for graph)
+
+``--asc <name>``
+
+    Text output.
+
+``--fif <name>``
+
+    Fif output.
+
+``--lisp <name>``
+
+    Lisp output.
+
+
+.. _mne_list_source_space:
+
+mne_list_source_space
+=====================
+
+The utility mne_list_source_space outputs
+the source space information into text files suitable for loading
+into the Neuromag MRIlab software.
+
+``--src <*name*>``
+
+    The source space to be listed. This can be either the output from mne_make_source_space
+    (`*src.fif`), output from the forward calculation (`*fwd.fif`), or
+    the output from the inverse operator decomposition (`*inv.fif`).
+
+``--mri <*name*>``
+
+    A file containing the transformation between the head and MRI coordinates
+    is specified with this option. This file can be either a Neuromag
+    MRI description file, the output from the forward calculation (`*fwd.fif`),
+    or the output from the inverse operator decomposition (`*inv.fif`).
+    If this file is included, the output will be in head coordinates.
+    Otherwise the source space will be listed in MRI coordinates.
+
+``--dip <*name*>``
+
+    Specifies the 'stem' for the Neuromag text format
+    dipole files to be output. Two files will be produced: <*stem*> -lh.dip
+    and <*stem*> -rh.dip. These correspond
+    to the left and right hemisphere part of the source space, respectively.
+    This source space data can be imported to MRIlab through the File/Import/Dipoles menu
+    item.
+
+``--pnt <*name*>``
+
+    Specifies the 'stem' for Neuromag text format
+    point files to be output. Two files will be produced: <*stem*> -lh.pnt
+    and <*stem*> -rh.pnt. These correspond
+    to the left and right hemisphere part of the source space, respectively.
+    This source space data can be imported to MRIlab through the File/Import/Strings menu
+    item.
+
+``--exclude <*name*>``
+
+    Exclude the source space points defined by the given FreeSurfer 'label' file
+    from the output. The name of the file should end with ``-lh.label``
+    if it refers to the left hemisphere and with ``-rh.label`` if
+    it lists points in the right hemisphere, respectively.
+
+``--include <*name*>``
+
+    Include only the source space points defined by the given FreeSurfer 'label' file
+    to the output. The file naming convention is the same as described
+    above under the ``--exclude`` option. Are 'include' labels are
+    processed before the 'exclude' labels.
+
+``--all``
+
+    Include all nodes in the output files instead of only those active
+    in the source space. Note that the output files will be huge if
+    this option is active.
+
+
+.. _mne_list_versions:
+
+mne_list_versions
+=================
+
+The utility mne_list_versions lists
+version numbers and compilation dates of all software modules that
+provide this information. This administration utility is located
+in ``$MNE_ROOT/bin/admin`` , The output from mne_list_versions or
+output of individual modules with ``--version`` option
+is useful when bugs are reported to the developers of MNE software.
+
+
+.. _mne_make_cor_set:
+
+mne_make_cor_set
+================
+
+The utility mne_make_cor_set creates
+a fif format MRI description
+file optionally including the MRI data using FreeSurfer MRI volume
+data as input. The command-line options are:
+
+``--dir <*directory*>``
+
+    Specifies a directory containing the MRI volume in COR format. Any
+    previous ``--mgh`` options are cancelled when this option
+    is encountered.
+
+``--withdata``
+
+    Include the pixel data to the output file. This option is implied
+    with the ``--mgh`` option.
+
+``--mgh <*name*>``
+
+    An MRI volume volume file in mgh or mgz format.
+    The ``--withdata`` option is implied with this type of
+    input. Furthermore, the :math:`T_3` transformation,
+    the Talairach transformation :math:`T_4` from
+    the talairach.xfm file referred to in the MRI volume, and the the
+    fixed transforms :math:`T_-` and :math:`T_+` will
+    added to the output file. For definition of the coordinate transformations,
+    see :ref:`CHDEDFIB`.
+
+``--talairach <*name*>``
+
+    Take the Talairach transform from this file instead of the one specified
+    in mgh/mgz files.
+
+``--out <*name*>``
+
+    Specifies the output file, which is a fif-format MRI description
+    file.
+
+
+.. _mne_make_derivations:
+
+mne_make_derivations
+====================
+
+Purpose
+-------
+
+In mne_browse_raw , channel
+derivations are defined as linear combinations of real channels
+existing in the data files. The utility mne_make_derivations reads
+derivation data from a suitably formatted text file and produces
+a fif file containing the weights of derived channels as a sparse
+matrix. Two input file formats are accepted:
+
+- A file containing arithmetic expressions
+  defining the derivations and
+
+- A file containing a matrix which specifies the weights of
+  the channels in each derivation.
+
+Both of these formats are described in
+
+Command-line options
+--------------------
+
+mne_make_derivations recognizes
+the following command-line options:
+
+``--in  <*name*>``
+
+    Specifies a measurement file which contains the EEG electrode locations.
+    This file is not modified.
+
+``--inmat  <*name*>``
+
+    Specifies the output file where the layout is stored. Suffix ``.lout`` is recommended
+    for layout files. mne_analyze and mne_browse_raw look
+    for the custom layout files from the directory ``$HOME/.mne/lout`` .
+
+``--trans``
+
+    Indicates that the file specified with the ``--inmat`` option
+    contains a transpose of the derivation matrix.
+
+``--thresh  <*value*>``
+
+    Specifies the threshold between values to be considered zero and non-zero
+    in the input file specified with the ``--inmat`` option.
+    The default threshold is :math:`10^{-6}`.
+
+``--out  <*name*>``
+
+    Specifies output fif file to contain the derivation data. The recommended
+    name of the derivation file has the format  <:math:`name`> ``-deriv.fif`` .
+
+``--list  <*name*>``
+
+    List the contents of a derivation file to standard output. If this
+    option is missing and ``--out`` is specified, the content
+    of the output file will be listed once it is complete. If neither ``--list`` nor ``--out`` is present,
+    and ``--in`` or ``--inmat`` is specified, the
+    interpreted contents of the input file is listed.
+
+Derivation file formats
+-----------------------
+
+All lines in the input files starting with the pound sign
+(#) are considered to be comments. The format of a derivation in
+a arithmetic input file is:
+
+.. math::    \langle name \rangle = [\langle w_1 \rangle *] \langle name_1 \rangle + [\langle w_2 \rangle *] \langle name_2 \rangle \dotso
+
+where <:math:`name`> is the
+name of the derived channel, :math:`name_k` are
+the names of the channels comprising the derivation, and :math:`w_k` are
+their weights. Note that spaces are necessary between the items.
+Channel names containing spaces must be put in quotes. For example,
+
+``EEG-diff = "EEG 003" - "EEG 002"``
+
+defines a channel ``EEG-diff`` which is a difference
+between ``EEG 003`` and ``EEG 002`` . Similarly,
+
+``EEG-der = 3 * "EEG 010" - 2 * "EEG 002"``
+
+defines a channel which is three times ``EEG 010`` minus
+two times ``EEG 002`` .
+
+The format of a matrix derivation file is:
+
+.. math::    \langle nrow \rangle \langle ncol \rangle \langle names\ of\ the\ input\ channels \rangle \langle name_1 \rangle \langle weights \rangle \dotso
+
+The combination of the two arithmetic examples, above can
+be thus represented as:
+
+``2 3 "EEG 002" "EEG 003" "EEG 010" EEG-diff -1 1  0 EEG-der -2 0  3``
+
+Before a derivation is accepted to use by mne_browse_raw ,
+the following criteria have to be met:
+
+- All channels to be combined into a single
+  derivation must have identical units of measure.
+
+- All channels in a single derivation have to be of the same
+  kind, *e.g.*, MEG channels or EEG channels.
+
+- All channels specified in a derivation have to be present
+  in the currently loaded data set.
+
+The validity check is done when a derivation file is loaded
+into mne_browse_raw , see :ref:`CACFHAFH`.
+
+.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system using    the :ref:`mne_rename_channels` utility. This allows you to use standard EEG    channel names in the derivations you define as well as in the channel    selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
+
+
+.. _mne_make_eeg_layout:
+
+mne_make_eeg_layout
+===================
+
+Purpose
+-------
+
+Both MNE software (mne_analyze and mne_browse_raw)
+and Neuromag software (xplotter and xfit)
+employ text layout files to create topographical displays of MEG
+and EEG data. While the MEG channel layout is fixed, the EEG layout
+varies from experiment to experiment, depending on the number of
+electrodes used and the electrode cap configuration. The utility mne_make_eeg_layout was
+created to produce custom EEG layout files based on the EEG electrode
+location information included in the channel description records.
+
+mne_make_eeg_layout uses
+azimuthal equidistant projection to map the EEG channel locations
+onto a plane. The mapping consists of the following steps:
+
+- A sphere is fitted to the electrode
+  locations and the locations are translated by the location of the
+  origin of the best-fitting sphere.
+
+- The spherical coordinates (:math:`r_k`, :math:`\theta_k`, and :math:`\phi_k`)
+  corresponding to each translated electrode location are computed.
+
+- The projected locations :math:`u_k = R \theta_k \cos{\phi_k}` and :math:`v_k = R \theta_k \sin{\phi_k}` are
+  computed. By default, :math:`R = 20/{^{\pi}/_2}`, *i.e.* at
+  the equator (:math:`\theta = ^{\pi}/_2`) the multiplier is
+  20. This projection radius can be adjusted with the ``--prad`` option.
+  Increasing or decreasing :math:`R` makes
+  the spacing between the channel viewports larger or smaller, respectively.
+
+- A viewport with width 5 and height 4 is placed centered at
+  the projected location. The width and height of the viewport can
+  be adjusted with the ``--width`` and ``--height`` options
+
+The command-line options are:
+
+``--lout  <*name*>``
+
+    Specifies the name of the layout file to be output.
+
+``--nofit``
+
+    Do not fit a sphere to the electrode locations but use a standard sphere
+    center (:math:`x = y = 0`, and :math:`z = 40` mm) instead.
+
+``--prad  <*value*>``
+
+    Specifies a non-standard projection radius :math:`R`,
+    see above.
+
+``--width  <*value*>``
+
+    Specifies the width of the viewports. Default value = 5.
+
+``--height  <*value*>``
+
+    Specifies the height of the viewports. Default value = 4.
+
+
+.. _mne_make_morph_maps:
+
+mne_make_morph_maps
+===================
+Prepare the mapping data for subject-to-subject morphing.
+
+``--redo``
+
+    Recompute the morphing maps even if they already exist.
+
+``--from <subject>``
+
+    Compute morphing maps from this subject.
+
+``--to <subject>``
+
+    Compute morphing maps to this subject.
+
+``--all``
+
+    Do all combinations. If this is used without either ``--from`` or ``--to`` options,
+    morphing maps for all possible combinations are computed. If ``--from`` or ``--to`` is
+    present, only maps between the specified subject and all others
+    are computed.
+
+.. note:: Because all morphing map files contain maps in both directions, the choice of ``--from`` and ``--to`` options    only affect the naming of the morphing map files to be produced. mne_make_morph_maps creates directory ``$SUBJECTS_DIR/morph-maps`` if necessary.
+
+
+.. _mne_make_uniform_stc:
+
+mne_make_uniform_stc
+====================
+
+The output will have a time range from -100 to 300 ms.
+There will be one cycle of 5-Hz sine wave, with the peaks at 50 and 150 ms
+
+``--src <name>``
+
+    Source space to use.
+
+``--stc <name>``
+
+    Stc file to produce.
+
+``--maxval <value>``
+
+    Maximum value (at 50 ms, default 10).
+
+``--all``
+
+    Include all points to the output files.
+
+
+.. _mne_mark_bad_channels:
+
+mne_mark_bad_channels
+=====================
+
+This utility adds or replaces information about unusable
+(bad) channels. The command line options are:
+
+``--bad  <*filename*>``
+
+    Specify a text file containing the names of the bad channels, one channel
+    name per line. The names of the channels in this file must match
+    those in the data file exactly. If this option is missing, the bad channel
+    information is cleared.
+
+``<*data file name*>``
+
+    The remaining arguments are taken as data file names to be modified.
+
+
+.. _mne_morph_labels:
+
+mne_morph_labels
+================
+Morph label files from one brain to another.
+
+``--from <*subject*>``
+
+    Name of the subject for which the labels were originally defined.
+
+``--to <*subject*>``
+
+    Name of the subject for which the morphed labels should be created.
+
+``--labeldir <*directory*>``
+
+    A directory containing the labels to morph.
+
+``--prefix <prefix>``
+
+    Adds <*prefix*> in the beginning
+    of the output label names. A dash will be inserted between <*prefix*> and
+    the rest of the name.
+
+``--smooth <number>``
+
+    Apply smoothing with the indicated number of iteration steps (see :ref:`CHDEBAHH`) to the labels before morphing them. This is
+    advisable because otherwise the resulting labels may have little
+    holes in them since the morphing map is not a bijection. By default,
+    two smoothsteps are taken.
+
+As the labels are morphed, a directory with the name of the
+subject specified with the ``--to`` option is created under
+the directory specified with ``--labeldir`` to hold the
+morphed labels.
+
+
+.. _mne_organize_dicom:
+
+mne_organize_dicom
+==================
+
+
+.. _mne_prepare_bem_model:
+
+mne_prepare_bem_model
+=====================
+
+``--bem <*name*>``
+
+    Specify the name of the file containing the triangulations of the BEM
+    surfaces and the conductivities of the compartments. The standard
+    ending for this file is ``-bem.fif`` and it is produced
+    either with the utility :ref:`mne_surf2bem` (:ref:`BEHCACCJ`) or the
+    convenience script :ref:`mne_setup_forward_model`,
+    see :ref:`CIHDBFEG`.
+
+``--sol <*name*>``
+
+    Specify the name of the file containing the triangulation and conductivity
+    information together with the BEM geometry matrix computed by mne_prepare_bem_model .
+    The standard ending for this file is ``-bem-sol.fif`` .
+
+``--method <*approximation method*>``
+
+    Select the BEM approach. If <*approximation method*> is ``constant`` ,
+    the BEM basis functions are constant functions on each triangle
+    and the collocation points are the midpoints of the triangles. With ``linear`` ,
+    the BEM basis functions are linear functions on each triangle and
+    the collocation points are the vertices of the triangulation. This
+    is the preferred method to use. The accuracy will be the same or
+    better than in the constant collocation approach with about half
+    the number of unknowns in the BEM equations.
+
+
+.. _mne_process_stc:
+
+mne_process_stc
+===============
+
+``--stc <name>``
+
+    Specify the stc file to process.
+
+``--out <name>``
+
+    Specify a stc  output file name.
+
+``--outasc <name>``
+
+    Specify a text output file name.
+
+``--scaleto <scale>``
+
+    Scale the data so that the maximum is this value.
+
+``--scaleby <scale>``
+
+    Multiply the values by this.
+
+
+.. _mne_raw2mat:
+
+mne_raw2mat
+===========
+
+The utility mne_raw2mat converts
+all or selected channels from a raw data file to a Matlab mat file.
+In addition, this utility can provide information about the raw
+data file so that the raw data can be read directly from the original
+fif file using Matlab file I/O routines.
+
+.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides    direct access to raw fif files without a need for conversion to    mat file format first. Therefore, it is recommended that you use    the Matlab toolbox rather than  mne_raw2mat which    creates large files occupying disk space unnecessarily.
+
+Command-line options
+--------------------
+
+mne_raw2mat accepts the
+following command-line options:
+
+``--raw <*name*>``
+
+    Specifies the name of the raw data fif file to convert.
+
+``--mat <*name*>``
+
+    Specifies the name of the destination Matlab file.
+
+``--info``
+
+    With this option present, only information about the raw data file
+    is included. The raw data itself is omitted.
+
+``--sel <*name*>``
+
+    Specifies a text file which contains the names of the channels to include
+    in the output file, one channel name per line. If the ``--info`` option
+    is specified, ``--sel`` does not have any effect.
+
+``--tag <*tag*>``
+
+    By default, all Matlab variables included in the output file start
+    with ``mne\_``. This option changes the prefix to <*tag*> _.
+
+Matlab data structures
+----------------------
+
+The Matlab files output by mne_raw2mat can
+contain two data structures, <*tag*>_raw and <*tag*>_raw_info .
+If ``--info`` option is specifed, the file contains the
+latter structure only.
+
+The <*tag*>_raw structure
+contains only one field, data which
+is a matrix containing the raw data. Each row of this matrix constitutes
+the data from one channel in the original file. The data type of
+this matrix is the same of the original data (2-byte signed integer,
+4-byte signed integer, or single-precision float).
+
+The fields of the <*tag*>_raw_info structure
+are listed in :ref:`BEHFDCIH`. Further explanation of the bufs field
+is provided in :ref:`BEHJEIHJ`.
+
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
+.. _BEHFDCIH:
+.. table:: The fields of the raw data info structure.
+
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | Variable              | Size            | Description                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | orig_file             | string          | The name of the original fif file specified with the       |
+    |                       |                 | ``--raw`` option.                                          |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nchan                 | 1               | Number of channels.                                        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | nsamp                 | 1               | Total number of samples                                    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | bufs                  | nbuf x 4        | This field is present if ``--info`` option was specified on|
+    |                       |                 | the command line. For details, see :ref:`BEHJEIHJ`.        |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | sfreq                 | 1               | The sampling frequency in Hz.                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | highpass              | 1               | Highpass filter frequency (Hz)                             |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_names              | nchan (string)  | String array containing the names of the channels included |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_types              | nchan x 2       | The column lists the types of the channesl (1 = MEG, 2 =   |
+    |                       |                 | EEG). The second column lists the coil types, see          |
+    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
+    |                       |                 | this value equals one.                                     |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
+    |                       |                 | The unit of the data is listed in the first column         |
+    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
+    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
+    |                       |                 | values specify the origin of the sensor coordinate system  |
+    |                       |                 | or the location of the electrode. For MEG channels, the    |
+    |                       |                 | following nine number specify the *x*, *y*, and            |
+    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
+    |                       |                 | For EEG electrodes the first vector after the electrode    |
+    |                       |                 | location specifies the location of the reference electrode.|
+    |                       |                 | If the reference is not specified this value is all zeroes.|
+    |                       |                 | The remaining unit vectors are irrelevant for EEG          |
+    |                       |                 | electrodes.                                                |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat is uncalibrated.        |
+    |                       |                 | The first column is the range member of the fiff data      |
+    |                       |                 | structures and while the second is the cal member. To get  |
+    |                       |                 | calibrared data values in the units given in ch_units from |
+    |                       |                 | the raw data, the data must be multiplied with the product |
+    |                       |                 | of range and cal .                                         |
+    +-----------------------+-----------------+------------------------------------------------------------+
+    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
+    |                       |                 | coordinates to the MEG head coordinates.                   |
+    +-----------------------+-----------------+------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.1\linewidth}|p{0.6\linewidth}|
+.. _BEHJEIHJ:
+.. table:: The bufs member of the raw data info structure.
+
+    +-----------------------+-------------------------------------------------------------------------+
+    | Column                | Contents                                                                |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 1                     | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte signed   |
+    |                       | integer, 4 = single-precision float). All data in the fif file are      |
+    |                       | written in the big-endian byte order. The raw data are stored sample by |
+    |                       | sample.                                                                 |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 2                     | Byte location of this buffer in the original fif file.                  |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 3                     | First sample of this buffer. Since raw data storing can be switched on  |
+    |                       | and off during the acquisition, there might be gaps between the end of  |
+    |                       | one buffer and the beginning of the next.                               |
+    +-----------------------+-------------------------------------------------------------------------+
+    | 4                     | Number of samples in the buffer.                                        |
+    +-----------------------+-------------------------------------------------------------------------+
+
+
+.. _mne_rename_channels:
+
+mne_rename_channels
+===================
+
+Sometimes it is necessary to change the names types of channels
+in MEG/EEG data files. Such situations include:
+
+- Designating an EEG as an EOG channel.
+  For example, the EOG channels are not recognized as such in the
+  fif files converted from CTF data files.
+
+- Changing the name of the digital trigger channel of interest
+  to STI 014 so that mne_browse_raw and mne_process_raw will
+  recognize the correct channel without the need to specify the ``--digtrig``
+  option or the MNE_TRIGGER_CH_NAME environment variable every time a
+  data file is loaded.
+
+The utility mne_rename_channels was
+designed to meet the above needs. It recognizes the following command-line
+options:
+
+``--fif  <*name*>``
+
+    Specifies the name of the data file to modify.
+
+``--alias  <*name*>``
+
+    Specifies the text file which contains the modifications to be applied,
+    see below.
+
+``--revert``
+
+    Reverse the roles of old and new channel names in the alias file.
+
+Each line in the alias file contains the old name and new
+name for a channel, separated by a colon. The old name is a name
+of one of the channels presently in the file and the new name is
+the name to be assigned to it. The old name must match an existing
+channel name in the file exactly. The new name may be followed by
+another colon and a number which is the channel type to be assigned
+to this channel. The channel type options are listed below.
+
+.. table:: Channel types.
+
+    ==============  ======================
+    Channel type    Corresponding number
+    ==============  ======================
+    MEG             1
+    MCG             201
+    EEG             2
+    EOG             202
+    EMG             302
+    ECG             402
+    MISC            502
+    STIM            3
+    ==============  ======================
+
+.. warning:: Do not attempt to designate MEG channels    to EEG channels or vice versa. This may result in strange errors    during source estimation.
+
+.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system. This    allows you to use standard EEG channel names when defining derivations,    see :ref:`mne_make_derivations` and :ref:`CACFHAFH`, as well as in the    channel selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
+
+
+.. _mne_sensitivity_map:
+
+mne_sensitivity_map
+===================
+
+Purpose
+-------
+
+mne_sensitivity_map computes
+the size of the columns of the forward operator and outputs the
+result in w files.
+
+Command line options
+--------------------
+
+mne_sensitivity_map accepts
+the following command-line options:
+
+``--fwd  <*name*>``
+
+    Specifies a forward solution file to analyze. By default the MEG
+    forward solution is considered.
+
+``--proj  <*name*>``
+
+    Specifies a file containing an SSP operator to be applied. If necessary,
+    multiple ``--proj`` options can be specified. For map types 1 - 4 (see
+    below), SSP is applied to the forward model data. For map types
+    5 and 6, the effects of SSP are evaluated against the unmodified
+    forward model.
+
+``--eeg``
+
+    Use the EEG forward solution instead of the MEG one. It does not make
+    sense to consider a combination because of the different units of
+    measure. For the same reason, gradiometers and magnetometers have
+    to be handled separately, see ``--mag`` option below. By
+    default MEG gradiometers are included.
+
+``--mag``
+
+    Include MEG magnetometers instead of gradiometers
+
+``--w  <*name*>``
+
+    Specifies the stem of the output w files. To obtain the final output file
+    names, ``-lh.w`` and ``-rh.w`` is appended for
+    the left and right hemisphere, respectively.
+
+``--smooth  <*number*>``
+
+    Specifies the number of smooth steps to apply to the resulting w files.
+    Default: no smoothing.
+
+``--map  <*number*>``
+
+    Select the type of a sensitivity map to compute. At present, valid numbers
+    are 1 - 6. For details, see :ref:`CHDCDJIJ`, below.
+
+.. _CHDCDJIJ:
+
+Available sensitivity maps
+--------------------------
+
+In the following, let
+
+.. math::    G_k = [g_{xk} g_{yk} g_{zk}]
+
+denote the three consecutive columns of the gain matrix :math:`G` corresponding to
+the fields of three orthogonal dipoles at source space location :math:`k`.
+Further, lets assume that the source coordinate system has been
+selected so that the :math:`z` -axis points
+to the cortical normal direction and the :math:`xy` plane
+is thus the tangent plane of the cortex at the source space location :math:`k`
+Next, compute the SVD
+
+.. math::    G_k = U_k \Lambda_k V_k
+
+and let :math:`g_{1k} = u_{1k} \lambda_{1k}`, where :math:`\lambda_{1k}` and :math:`u_{1k}` are
+the largest singular value and the corresponding left singular vector
+of :math:`G_k`, respectively. It is easy to see
+that :math:`g_{1k}` is has the largest power
+among the signal distributions produced by unit dipoles at source
+space location :math:`k`.
+
+Furthermore, assume that the colums orthogonal matrix :math:`U_P` (:math:`U_P^T U_P = I`) contain
+the orthogonal basis of the noise subspace corresponding to the signal
+space projection (SSP) operator :math:`P` specified
+with one or more ``--proj`` options so that :math:`P = I - U_P U_P^T`.
+For more information on SSP, see :ref:`CACCHABI`.
+
+With these definitions the map selections defined with the ``--map`` option correspond
+to the following
+
+``--map 1``
+
+    Compute :math:`\sqrt{g_{1k}^T g_{1k}} = \lambda_{1k}` at each source space point.
+    Normalize the result so that the maximum values equals one.
+
+``--map 2``
+
+    Compute :math:`\sqrt{g_z^T g_z}` at each source space point.
+    Normalize the result so that the maximum values equals one. This
+    is the amplitude of the signals produced by unit dipoles normal
+    to the cortical surface.
+
+``--map 3``
+
+    Compute :math:`\sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
+
+``--map 4``
+
+    Compute :math:`1 - \sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
+    This could be called the *radiality index*.
+
+``--map 5``
+
+    Compute the subspace correlation between :math:`g_z` and :math:`U_P`: :math:`\text{subcorr}^2(g_z , U_P) = (g_z^T U_P U_P^T g_z)/(g_z^T g_z)`.
+    This index equals zero, if :math:`g_z` is
+    orthogonal to :math:`U_P` and one if :math:`g_z` lies
+    in the subspace defined by :math:`U_P`. This
+    map shows how close the field pattern of a dipole oriented perpendicular
+    to the cortex at each cortical location is to the subspace removed
+    by the SSP.
+
+``--map 6``
+
+    Compute :math:`\sqrt{g_z^T P g_z / g_z^T g_z}`, which is the fraction
+    of the field pattern of a dipole oriented perpendicular to the cortex
+    at each cortical location remaining after applying the SSP a dipole
+    remaining
+
+
+.. _mne_sensor_locations:
+
+mne_sensor_locations
+====================
+
+``--meas <name>``
+
+    Measurement file.
+
+``--magonly``
+
+    Magnetometers only.
+
+``--ell``
+
+    Output sensor ellipsoids for MRIlab.
+
+``--dir``
+
+    Output direction info as well.
+
+``--dev``
+
+    Output in MEG device coordinates.
+
+``--out <name>``
+
+    Name output file
+
+
+.. _mne_show_fiff:
+
+mne_show_fiff
+=============
+
+Using the utility mne_show_fiff it
+is possible to display information about the contents of a fif file
+to the standard output. The command line options for mne_show_fiff are:
+
+``--in  <*name*>``
+
+    Specifies the fif file whose contents will be listed.
+
+``--verbose``
+
+    Produce a verbose output. The data of most tags is included in the output.
+    This excludes matrices and vectors. Only the first 80 characters
+    of strings are listed unless the ``--long`` option is present.
+
+``--blocks``
+
+    Only list the blocks (the tree structure) of the file. The tags
+    within each block are not listed.
+
+``--indent  <*number*>``
+
+    Number of spaces for indentation for each deeper level in the tree structure
+    of the fif files. The default indentation is 3 spaces in terse and
+    no spaces in verbose listing mode.
+
+``--long``
+
+    List all data from string tags instead of the first 80 characters.
+    This options has no effect unless the ``--verbose`` option
+    is also present.
+
+``--tag  <*number*>``
+
+    List only tags of this kind. Multiple ``--tag`` options
+    can be specified to list several different kinds of data.
+
+mne_show_fiff reads the
+explanations of tag kinds, block kinds, and units from ``$MNE_ROOT/share/mne/fiff_explanations.txt`` .
+
+
+.. _mne_simu:
+
+mne_simu
+========
+
+Purpose
+-------
+
+The utility mne_simu creates
+simulated evoked response data for investigation of the properties
+of the inverse solutions. It computes MEG signals generated by dipoles
+normal to the cortical mantle at one or several ROIs defined with
+label files. Colored noise can be added to the signals.
+
+Command-line options
+--------------------
+
+mne_simu has the following
+command-line options:
+
+``--fwd  <*name*>``
+
+    Specify a forward solution file to employ in the simulation.
+
+``--label  <*name*>``
+
+    Specify a label
+
+``--meg``
+
+    Provide MEG data in the output file.
+
+``--eeg``
+
+    Provide EEG data in the output file.
+
+``--out  <*name*>``
+
+    Specify the output file. By default, this will be an evoked data
+    file in the fif format.
+
+``--raw``
+
+    Output the data as a raw data fif file instead of an evoked one.
+
+``--mat``
+
+    Produce Matlab output of the simulated fields instead of the fif evoked
+    file.
+
+``--label  <*name*>``
+
+    Define an ROI. Several label files can be present. By default, the sources
+    in the labels will have :math:`\cos^2` -shaped non-overlapping
+    timecourses, see below.
+
+``--timecourse  <*name*>``
+
+    Specifies a text file which contains an expression for a source
+    time course, see :ref:`CHDCFIBH`. If no --timecourse options
+    are present, the standard source time courses described in :ref:`CHDFIIII` are used. Otherwise, the time course expressions
+    are read from the files specified. The time course expressions are
+    associated with the labels in the order they are specified. If the
+    number of expressions is smaller than the number of labels, the
+    last expression specified will reused for the remaining labels.
+
+``--sfreq  <*freq/Hz*>``
+
+    Specifies the sampling frequency of the output data (default = 1000 Hz). This
+    option is used only with the time course files.
+
+``--tmin  <*time/ms*>``
+
+    Specifies the starting time of the data, used only with time course files
+    (default -200 ms).
+
+``--tmax  <*time/ms*>``
+
+    Specifies the ending time of the data, used only with time course files
+    (default 500 ms).
+
+``--seed  <*number*>``
+
+    Specifies the seed for random numbers. This seed is used both for adding
+    noise, see :ref:`CHDFBJIJ` and for random numbers in source waveform
+    expressions, see :ref:`CHDCFIBH`. If no seed is specified, the
+    current time in seconds since Epoch (January 1, 1970) is used.
+
+``--all``
+
+    Activate all sources on the cortical surface uniformly. This overrides the ``--label`` options.
+
+.. _CHDFBJIJ:
+
+Noise simulation
+----------------
+
+Noise is added to the signals if the ``--senscov`` and ``--nave`` options
+are present. If ``--nave`` is omitted the number of averages
+is set to :math:`L = 100`. The noise is computed
+by first generating vectors of Gaussian random numbers :math:`n(t)` with :math:`n_j(t) \sim N(0,1)`.
+Thereafter, the noise-covariance matrix :math:`C` is
+used to color the noise:
+
+.. math::    n_c(t) = \frac{1}{\sqrt{L}} \Lambda U^T n(t)\ ,
+
+where we have used the eigenvalue decomposition positive-definite
+covariance matrix:
+
+.. math::    C = U \Lambda^2 U^T\ .
+
+Note that it is assumed that the noise-covariance matrix
+is given for raw data, *i.e.*, for :math:`L = 1`.
+
+.. _CHDFIIII:
+
+Simulated data
+--------------
+
+The default source waveform :math:`q_k` for
+the :math:`k^{th}` label is nonzero at times :math:`t_{kp} = (100(k - 1) + p)/f_s`, :math:`p = 0 \dotso 100` with:
+
+.. math::    q_k(t_{kp}) = Q_k \cos^2{(\frac{\pi p}{100} - \frac{\pi}{2})}\ ,
+
+i.e., the source waveforms are non-overlapping 100-samples
+wide :math:`\cos^2` pulses. The sampling frequency :math:`f_s = 600` Hz.
+The source amplitude :math:`Q_k` is determined
+so that the strength of each of the dipoles in a label will be :math:`50 \text{nAm}/N_k`.
+
+Let us denote the sums of the magnetic fields and electric
+potentials produced by the dipoles normal to the cortical mantle
+at label :math:`k` by :math:`x_k`. The simulated
+signals are then:
+
+.. math::    x(t_j) = \sum_{k = 1}^{N_s} {q_k(t_j) x_k + n_c(t_j)}\ ,
+
+where :math:`N_s` is the number of
+sources.
+
+.. _CHDCFIBH:
+
+Source waveform expressions
+---------------------------
+
+The ``--timecourse`` option provides flexible possibilities
+to define the source waveforms in a functional form. The source
+waveform expression files consist of lines of the form:
+
+ <*variable*> ``=``  <*arithmetic expression*>
+
+Each file may contain multiple lines. At the end of the evaluation,
+only the values in the variable ``y`` (``q`` )
+are significant, see :ref:`CHDJBIEE`. They assume the role
+of :math:`q_k(t_j)` to compute the simulated signals
+as described in :ref:`CHDFIIII`, above.
+
+All expressions are case insensitive. The variables are vectors
+with the length equal to the number of samples in the responses,
+determined by the ``--tmin`` , ``--tmax`` , and ``--sfreq`` options.
+The available variables are listed in :ref:`CHDJBIEE`.
+
+.. _CHDJBIEE:
+
+.. table:: Available variable names in source waveform expressions.
+
+    ================  =======================================
+    Variable          Meaning
+    ================  =======================================
+    x                 time [s]
+    t                 current value of x in [ms]
+    y                 the source amplitude [Am]
+    q                 synonym for y
+    a , b , c , d     help variables, initialized to zeros
+    ================  =======================================
+
+The arithmetic expressions can use usual arithmetic operations
+as well as  mathematical functions listed in :ref:`CHDJIBHA`.
+The arguments can be vectors or scalar numbers. In addition, standard
+relational operators ( <, >, ==, <=, >=) and their textual
+equivalents (lt, gt, eq, le, ge) are available. Table :ref:`CHDDJEHH` gives some
+useful examples of source waveform expressions.
+
+.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
+.. _CHDJIBHA:
+.. table:: Mathematical functions available for source waveform expressions
+
+    +-----------------------+---------------------------------------------------------------+
+    | Function              | Description                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | abs(x)                | absolute value                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | acos(x)               | :math:`\cos^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | asin(x)               | :math:`\sin^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | atan(x)               | :math:`\tan^{-1}x`                                            |
+    +-----------------------+---------------------------------------------------------------+
+    | atan2(x,y)            | :math:`\tan^{-1}(^y/_x)`                                      |
+    +-----------------------+---------------------------------------------------------------+
+    | ceil(x)               | nearest integer larger than :math:`x`                         |
+    +-----------------------+---------------------------------------------------------------+
+    | cos(x)                | :math:`\cos x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | cosw(x,a,b,c)         | :math:`\cos^2` -shaped window centered at :math:`b` with a    |
+    |                       | rising slope of length :math:`a` and a trailing slope of      |
+    |                       | length :math:`b`.                                             |
+    +-----------------------+---------------------------------------------------------------+
+    | deg(x)                | The value of :math:`x` converted to from radians to degrees   |
+    +-----------------------+---------------------------------------------------------------+
+    | erf(x)                | :math:`\frac{1}{2\pi} \int_0^x{\text{exp}(-t^2)dt}`           |
+    +-----------------------+---------------------------------------------------------------+
+    | erfc(x)               | :math:`1 - \text{erf}(x)`                                     |
+    +-----------------------+---------------------------------------------------------------+
+    | exp(x)                | :math:`e^x`                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | floor(x)              | Largest integer value not larger than :math:`x`               |
+    +-----------------------+---------------------------------------------------------------+
+    | hypot(x,y)            | :math:`\sqrt{x^2 + y^2}`                                      |
+    +-----------------------+---------------------------------------------------------------+
+    | ln(x)                 | :math:`\ln x`                                                 |
+    +-----------------------+---------------------------------------------------------------+
+    | log(x)                | :math:`\log_{10} x`                                           |
+    +-----------------------+---------------------------------------------------------------+
+    | maxp(x,y)             | Takes the maximum between :math:`x` and :math:`y`             |
+    +-----------------------+---------------------------------------------------------------+
+    | minp(x,y)             | Takes the minimum between :math:`x` and :math:`y`             |
+    +-----------------------+---------------------------------------------------------------+
+    | mod(x,y)              | Gives the remainder of  :math:`x` divided by :math:`y`        |
+    +-----------------------+---------------------------------------------------------------+
+    | pi                    | Ratio of the circumference of a circle and its diameter.      |
+    +-----------------------+---------------------------------------------------------------+
+    | rand                  | Gives a vector of uniformly distributed random numbers        |
+    |                       | from 0 to 1.                                                  |
+    +-----------------------+---------------------------------------------------------------+
+    | rnorm(x,y)            | Gives a vector of Gaussian random numbers distributed as      |
+    |                       | :math:`N(x,y)`. Note that if :math:`x` and :math:`y` are      |
+    |                       | vectors, each number generated will a different mean and      |
+    |                       | variance according to the arguments.                          |
+    +-----------------------+---------------------------------------------------------------+
+    | shift(x,s)            | Shifts the values in the input vector :math:`x` by the number |
+    |                       | of positions given by :math:`s`. Note that :math:`s` must be  |
+    |                       | a scalar.                                                     |
+    +-----------------------+---------------------------------------------------------------+
+    | sin(x)                | :math:`\sin x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+    | sqr(x)                | :math:`x^2`                                                   |
+    +-----------------------+---------------------------------------------------------------+
+    | sqrt(x)               | :math:`\sqrt{x}`                                              |
+    +-----------------------+---------------------------------------------------------------+
+    | tan(x)                | :math:`\tan x`                                                |
+    +-----------------------+---------------------------------------------------------------+
+
+
+.. tabularcolumns:: |p{0.4\linewidth}|p{0.4\linewidth}|
+.. _CHDDJEHH:
+.. table:: Examples of source waveform expressions.
+
+    +---------------------------------------------+-------------------------------------------------------------+
+    | Expression                                  | Meaning                                                     |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*sin(2*pi*10*x)                    | A 10-Hz sine wave with 20 nAm amplitude                     |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*sin(2*pi*2*x)*sin(2*pi*10*x)      | A 10-Hz 20-nAm sine wave, amplitude modulated               |
+    |                                             | sinusoidally at 2 Hz.                                       |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 20e-9*cosw(t,100,100,100)               | :math:`\cos^2`-shaped pulse, centered at :math:`t` = 100 ms |
+    |                                             | with 100 ms leading and trailing slopes, 20 nAm amplitude   |
+    +---------------------------------------------+-------------------------------------------------------------+
+    | q = 30e-9*(t > 0)*(t  <* 300)*sin(2*pi*20*x)| 20-Hz sine wave, 30 nAm amplitude, cropped in time to       |
+    |                                             | 0...300 ms.                                                 |
+    +---------------------------------------------+-------------------------------------------------------------+
+
+
+.. _mne_smooth:
+
+mne_smooth
+==========
+
+Produce a smoothed version of a w or an stc file
+
+``--src <name>``
+
+    The source space file.
+
+``--in <name>``
+
+    The w or stc file to smooth.
+
+``--smooth <val>``
+
+    Number of smoothsteps
+
+
+.. _mne_surf2bem:
+
+mne_surf2bem
+============
+
+``--surf <*name*>``
+
+    Specifies a FreeSurfer binary format surface file. Before specifying the
+    next surface (``--surf`` or ``--tri`` options)
+    details of the surface specification can be given with the options
+    listed in :ref:`BEHCDICC`.
+
+``--tri <*name*>``
+
+    Specifies a text format surface file. Before specifying the next
+    surface (``--surf`` or ``--tri`` options) details
+    of the surface specification can be given with the options listed
+    in :ref:`BEHCDICC`. The format of these files is described
+    in :ref:`BEHDEFCD`.
+
+``--check``
+
+    Check that the surfaces are complete and that they do not intersect. This
+    is a recommended option. For more information, see :ref:`BEHCBDDE`.
+
+``--checkmore``
+
+    In addition to the checks implied by the ``--check`` option,
+    check skull and skull thicknesses. For more information, see :ref:`BEHCBDDE`.
+
+``--fif <*name*>``
+
+    The output fif file containing the BEM. These files normally reside in
+    the bem subdirectory under the subject's mri data. A name
+    ending with ``-bem.fif`` is recommended.
+
+.. _BEHCDICC:
+
+Surface options
+---------------
+
+These options can be specified after each ``--surf`` or ``--tri`` option
+to define details for the corresponding surface.
+
+``--swap``
+
+    Swap the ordering or the triangle vertices. The standard convention in
+    the MNE software is to have the vertices ordered so that the vector
+    cross product of the vectors from vertex 1 to 2 and 1 to 3 gives the
+    direction of the outward surface normal. Text format triangle files
+    produced by the some software packages have an opposite order. For
+    these files, the ``--swap`` . option is required. This option does
+    not have any effect on the interpretation of the FreeSurfer surface
+    files specified with the ``--surf`` option.
+
+``--sigma <*value*>``
+
+    The conductivity of the compartment inside this surface in S/m.
+
+``--shift <*value/mm*>``
+
+    Shift the vertices of this surface by this amount, given in mm,
+    in the outward direction, *i.e.*, in the positive
+    vertex normal direction.
+
+``--meters``
+
+    The vertex coordinates of this surface are given in meters instead
+    of millimeters. This option applies to text format files only. This
+    definition does not affect the units of the shift option.
+
+``--id <*number*>``
+
+    Identification number to assign to this surface. (1 = inner skull, 3
+    = outer skull, 4 = scalp).
+
+``--ico <*number*>``
+
+    Downsample the surface to the designated subdivision of an icosahedron.
+    This option is relevant (and required) only if the triangulation
+    is isomorphic with a recursively subdivided icosahedron. For example,
+    the surfaces produced by with mri_watershed are
+    isomorphic with the 5th subdivision of a an icosahedron thus containing 20480
+    triangles. However, this number of triangles is too large for present
+    computers. Therefore, the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
+    in 1280 triangles. The recommended choice is ``--ico 4`` .
+
+
+.. _mne_toggle_skips:
+
+mne_toggle_skips
+================
+
+Toggle skip tags on and off.
+
+``--raw <name>``
+
+    The raw data file to process.
+
+
+.. _mne_transform_points:
+
+mne_transform_points
+====================
+
+Purpose
+-------
+
+mne_transform_points applies
+the coordinate transformation relating the MEG head coordinates
+and the MRI coordinates to a set of locations listed in a text file.
+
+Command line options
+--------------------
+
+mne_transform_points accepts
+the following command-line options:
+
+``--in  <*name*>``
+
+    Specifies the input file. The file must contain three numbers on
+    each line which are the *x*, *y*,
+    and *z* coordinates of point in space. By default,
+    the input is in millimeters.
+
+``--iso  <*name*>``
+
+    Specifies a name of a fif file containing Isotrak data. If this
+    option is present file will be used as the input instead of the
+    text file specified with the ``--in`` option.
+
+``--trans  <*name*>``
+
+    Specifies the name of a fif file containing the coordinate transformation
+    between the MEG head coordinates and MRI coordinates. If this file
+    is not present, the transformation will be replaced by a unit transform.
+
+``--out  <*name*>``
+
+    Specifies the output file. This file has the same format as the
+    input file.
+
+``--hpts``
+
+    Output the data in the head points (hpts)
+    format accepted by tkmedit . In
+    this format, the coordinates are preceded by a point category (hpi,
+    cardinal or fiducial, eeg, extra) and a sequence number, see :ref:`CJADJEBH`.
+
+``--meters``
+
+    The coordinates are listed in meters rather than millimeters.
+
+``--tomri``
+
+    By default, the coordinates are transformed from MRI coordinates to
+    MEG head coordinates. This option reverses the transformation to
+    be from MEG head coordinates to MRI coordinates.
+
+
+.. _mne_tufts2fiff:
+
+mne_tufts2fiff
+==============
+
+``--raw <*filename*>``
+
+    Specifies the name of the raw data file to process.
+
+``--cal <*filename*>``
+
+    The name of the calibration data file. If calibration data are missing, the
+    calibration coefficients will be set to unity.
+
+``--elp <*filename*>``
+
+    The name of the electrode location file. If this file is missing,
+    the electrode locations will be unspecified. This file is in the "probe" file
+    format used by the *Source Signal Imaging, Inc.* software.
+    For description of the format, see http://www.sourcesignal.com/formats_probe.html.
+    The fiducial marker locations, optional in the "probe" file
+    format specification are mandatory for mne_tufts2fiff . Note
+    that some other software packages may produce electrode-position
+    files with the elp ending not
+    conforming to the above specification.
+
+.. note::
+
+    The conversion process includes a transformation from the Tufts head coordinate system convention to that used in    the Neuromag systems.
+
+.. note::
+
+    The fiducial landmark locations, optional in the probe file format, must be present for mne_tufts2fiff .
+
+
+.. _mne_view_manual:
+
+mne_view_manual
+===============
+
+This script shows you the manual in a PDF reader.
+
+
+.. _mne_volume_data2mri:
+
+mne_volume_data2mri
+===================
+
+With help of the :ref:`mne_volume_source_space` utility
+it is possible to create a source space which
+is defined within a volume rather than a surface. If the ``--mri`` option
+was used in :ref:`mne_volume_source_space`, the
+source space file contains an interpolator matrix which performs
+a trilinear interpolation into the voxel space of the MRI volume
+specified.
+
+The purpose of the :ref:`mne_volume_data2mri` is
+to produce MRI overlay data compatible with FreeSurfer MRI viewers
+(in the mgh or mgz formats) from this type of w or stc files.
+
+The command-line options are:
+
+``--src <*filename*>``
+
+    The name of the volumetric source space file created with mne_volume_source_space .
+    The source space must have been created with the ``--mri`` option,
+    which adds the appropriate sparse trilinear interpolator matrix
+    to the source space.
+
+``--w <*filename*>``
+
+    The name of a w file to convert
+    into an MRI overlay.
+
+``--stc <*filename*>``
+
+    The name of the stc file to convert
+    into an MRI overlay. If this file has many time frames, the output
+    file may be huge. Note: If both ``-w`` and ``--stc`` are
+    specified, ``-w`` takes precedence.
+
+``--scale <*number*>``
+
+    Multiply the stc or w by
+    this scaling constant before producing the overlay.
+
+``--out <*filename*>``
+
+    Specifies the name of the output MRI overlay file. The name must end
+    with either ``.mgh`` or ``.mgz`` identifying the
+    uncompressed and compressed FreeSurfer MRI formats, respectively.
+
+
+.. _mne_volume_source_space:
+
+mne_volume_source_space
+=======================
+
+``--surf <*name*>``
+
+    Specifies a FreeSurfer surface file containing the surface which
+    will be used as the boundary for the source space.
+
+``--bem <*name*>``
+
+    Specifies a BEM file (ending in ``-bem.fif`` ). The inner
+    skull surface will be used as the boundary for the source space.
+
+``--origin <*x/mm*> : <*y/mm*> : <*z/mm*>``
+
+    If neither of the two surface options described above is present,
+    the source space will be spherical with the origin at this location,
+    given in MRI (RAS) coordinates.
+
+``--rad <*radius/mm*>``
+
+    Specifies the radius of a spherical source space. Default value
+    = 90 mm
+
+``--grid <*spacing/mm*>``
+
+    Specifies the grid spacing in the source space.
+
+``--mindist <*distance/mm*>``
+
+    Only points which are further than this distance from the bounding surface
+    are included. Default value = 5 mm.
+
+``--exclude <*distance/mm*>``
+
+    Exclude points that are closer than this distance to the center
+    of mass of the bounding surface. By default, there will be no exclusion.
+
+``--mri <*name*>``
+
+    Specifies a MRI volume (in mgz or mgh format).
+    If this argument is present the output source space file will contain
+    a (sparse) interpolation matrix which allows mne_volume_data2mri to
+    create an MRI overlay file, see :ref:`mne_volume_data2mri`.
+
+``--pos <*name*>``
+
+    Specifies a name of a text file containing the source locations
+    and, optionally, orientations. Each line of the file should contain
+    3 or 6 values. If the number of values is 3, they indicate the source
+    location, in millimeters. The orientation of the sources will be
+    set to the z-direction. If the number of values is 6, the source
+    orientation will be parallel to the vector defined by the remaining
+    3 numbers on each line. With ``--pos`` , all of the options
+    defined above will be ignored. By default, the source position and
+    orientation data are assumed to be given in MRI coordinates.
+
+``--head``
+
+    If this option is present, the source locations and orientations
+    in the file specified with the ``--pos`` option are assumed
+    to be given in the MEG head coordinates.
+
+``--meters``
+
+    Indicates that the source locations in the file defined with the ``--pos`` option
+    are give in meters instead of millimeters.
+
+``--src <*name*>``
+
+    Specifies the output file name. Use a name * <*dir*>/ <*name*>*-src.fif
+
+``--all``
+
+    Include all vertices in the output file, not just those in use.
+    This option is implied when the ``--mri`` option is present.
+    Even with the ``--all`` option, only those vertices actually
+    selected will be marked to be "in use" in the
+    output source space file.
+
+
+.. _mne_watershed_bem:
+
+mne_watershed_bem
+=================
+
+``--subject  <*subject*>``
+
+    Defines the name of the subject. This can be also accomplished
+    by setting the SUBJECT environment variable.
+
+``--overwrite``
+
+    Overwrite the results of previous run of mne_watershed_bem .
+
+``--atlas``
+
+    Makes mri_watershed to employ
+    atlas information to correct the segmentation.
diff --git a/doc/manual/cookbook.rst b/doc/manual/cookbook.rst
new file mode 100644
index 0000000..6ecdb8b
--- /dev/null
+++ b/doc/manual/cookbook.rst
@@ -0,0 +1,420 @@
+.. _cookbook:
+
+========
+Cookbook
+========
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+Overview
+========
+
+This section describes a typical MEG/EEG workflow, eventually up to source
+reconstruction. The workflow is summarized in :ref:`flow_diagram`.
+References below refer to Python functions and objects.
+
+.. _flow_diagram:
+
+.. figure:: ../_static/flow_diagram.svg
+    :alt: MNE Workflow Flowchart
+    :align: center
+
+    **Workflow of the MNE software**
+
+
+Preprocessing
+=============
+The following MEG and EEG data preprocessing steps are recommended:
+
+- Bad channels in the MEG and EEG data must be identified, see :ref:`BABBHCFG`.
+
+- The data has to be filtered to the desired passband.
+
+- Artifacts should be suppressed (e.g., using ICA or SSP).
+
+.. note:: For older systems, coding problems on the trigger channel
+  STI 014 and EEG/MEG information may need to be fixed, see
+  :ref:`BABCDBDI` and :ref:`BABCDFJH`.
+
+
+.. _BABBHCFG:
+
+Marking bad channels
+--------------------
+
+Sometimes some MEG or EEG channels are not functioning properly
+for various reasons. These channels should be excluded from
+analysis by marking them bad as::
+
+    >>> raw.info['bads'] = ['MEG2443']
+
+Especially if a channel does not show
+a signal at all (flat) it is important to exclude it from the
+analysis, since its noise estimate will be unrealistically low and
+thus the current estimate calculations will give a strong weight
+to the zero signal on the flat channels and will essentially vanish.
+It is also important to exclude noisy channels because they can
+possibly affect others when signal-space projections or EEG average electrode
+reference is employed. Noisy bad channels can also adversely affect
+averaging and noise-covariance matrix estimation by causing
+unnecessary rejections of epochs.
+
+Recommended ways to identify bad channels are:
+
+- Observe the quality of data during data
+  acquisition and make notes of observed malfunctioning channels to
+  your measurement protocol sheet.
+
+- View the on-line averages and check the condition of the channels.
+
+- Compute preliminary off-line averages with artefact rejection,
+  SSP/ICA, and EEG average electrode reference computation
+  off and check the condition of the channels.
+
+- View raw data with :func:`mne.io.Raw.plot` without SSP/ICA
+  enabled and identify bad channels.
+
+.. note:: It is strongly recommended that bad channels are identified and
+          marked in the original raw data files. If present in the raw data
+          files, the bad channel selections will be automatically transferred
+          to averaged files, noise-covariance matrices, forward solution
+          files, and inverse operator decompositions.
+
+Artifact suppression
+--------------------
+
+SSP
+###
+
+The Signal-Space Projection (SSP) is one approach to rejection
+of external disturbances in software. Unlike many other
+noise-cancellation approaches, SSP does
+not require additional reference sensors to record the disturbance
+fields. Instead, SSP relies on the fact that the magnetic field
+distributions generated by the sources in the brain have spatial
+distributions sufficiently different from those generated by external
+noise sources. Furthermore, it is implicitly assumed that the linear
+space spanned by the significant external noise patters has a low
+dimension.
+
+SSP-based rejection is often done using the
+:func:`mne.preprocessing.compute_proj_ecg` and
+:func:`mne.preprocessing.compute_proj_eog` methods, see :ref:`ssp`
+section for more information.
+
+ICA
+###
+
+Many M/EEG signals including biological artifacts reflect non-Gaussian
+processes. Therefore PCA-based artifact rejection will likely perform worse at
+separating the signal from noise sources.
+
+ICA-based artifact rejection is done using the :class:`mne.preprocessing.ICA`
+class, see the :ref:`ica` section for more information.
+
+
+Epoching and evoked data
+========================
+
+Epoching of raw data is done using events, which define a ``t=0`` for your
+data chunks. Event times stamped to the acquisition software can be extracted
+using :func:`mne.find_events`::
+
+    >>> events = mne.find_events(raw)
+
+The ``events`` array can then be modified, extended, or changed if necessary.
+If the original trigger codes and trigger times are correct for the analysis
+of interest, :class:`mne.Epochs` for the first event type (``1``) can be
+constructed using::
+
+    >>> reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+    >>> epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
+    >>>                     proj=True, picks=picks, baseline=(None, 0),
+    >>>                     preload=True, reject=reject)
+
+Once the :class:`mne.Epochs` are constructed, they can be averaged to obtain
+:class:`mne.Evoked` data as::
+
+    >>> evoked = epochs.average()
+
+
+Source localization
+===================
+
+MNE makes extensive use of the FreeSurfer file structure for analysis.
+Before starting data analysis, we recommend setting up the environment
+variable ``SUBJECTS_DIR`` (or set it permanently using :func:`mne.set_config`)
+to select the directory under which the anatomical MRI data are stored.
+This makes it so that the ``subjects_dir`` argument does not need to
+be passed to many functions.
+
+Anatomical information
+----------------------
+
+.. _CHDBBCEJ:
+
+Cortical surface reconstruction with FreeSurfer
+###############################################
+
+The first processing stage is the creation of various surface
+reconstructions with FreeSurfer. The recommended FreeSurfer workflow
+is summarized on the `FreeSurfer wiki pages <https://surfer.nmr.mgh.harvard.edu/fswiki/RecommendedReconstruction>`_.
+
+.. _CIHCHDAE:
+
+Setting up the source space
+###########################
+
+This stage consists of the following:
+
+- Creating a suitable decimated dipole grid on the white matter surface.
+
+- Creating the source space file in fif format.
+
+This is accomplished with using :func:`mne.setup_source_space` and
+:func:`mne.write_source_spaces`. These assume that the anatomical MRI processing
+has been completed as described in :ref:`CHDBBCEJ`.
+
+.. _BABGCDHA:
+
+.. table:: Recommended subdivisions of an icosahedron and an octahedron for
+           the creation of source spaces. The approximate source spacing and
+           corresponding surface area have been calculated assuming a
+           1000-cm2 surface area per hemisphere.
+
+    ===========  ======================  ===================  =============================
+    ``spacing``  Sources per hemisphere  Source spacing / mm  Surface area per source / mm2
+    ===========  ======================  ===================  =============================
+    ``'oct5'``   1026                    9.9                  97
+    ``'ico4'``   2562                    6.2                  39
+    ``'oct6'``   4098                    4.9                  24
+    ``'ico5'``   10242                   3.1                  9.8
+    ===========  ======================  ===================  =============================
+
+For example, to create the reconstruction geometry for ``subject='sample'``
+with a ~5-mm spacing between the grid points, say::
+
+    >>> src = setup_source_space('sample', spacing='oct6')
+    >>> write_source_spaces('sample-oct6-src.fif', src)
+
+This creates the source spaces and writes them to disk.
+
+.. _CHDBJCIA:
+
+Creating the BEM model meshes
+#############################
+
+Calculation of the forward solution using the boundary-element
+model (BEM) requires that the surfaces separating regions of different
+electrical conductivities are tessellated with suitable surface
+elements. Our BEM software employs triangular tessellations. Therefore,
+prerequisites for BEM calculations are the segmentation of the MRI
+data and the triangulation of the relevant surfaces.
+
+For MEG computations, a reasonably accurate solution can
+be obtained by using a single-compartment BEM assuming the shape
+of the intracranial volume. For EEG, the standard model contains
+the intracranial space, the skull, and the scalp.
+
+At present, no bulletproof method exists for creating the
+triangulations. Feasible approaches are described in :ref:`create_bem_model`.
+
+.. _BABDBBFC:
+
+Setting up the head surface triangulation files
+###############################################
+
+The segmentation algorithms described in :ref:`create_bem_model` produce
+either FreeSurfer surfaces or triangulation
+data in text. Before proceeding to the creation of the boundary
+element model, standard files for FreeSurfer surfaces must be present:
+
+1. **inner_skull.surf** contains the inner skull triangulation.
+
+2. **outer_skull.surf** contains the outer skull triangulation.
+
+3. **outer_skin.surf** contains the head surface triangulation.
+
+.. _CIHDBFEG:
+
+Setting up the boundary-element model
+#####################################
+
+This stage sets up the subject-dependent data for computing
+the forward solutions:"
+
+    >>> model = make_bem_model('sample')
+    >>> write_bem_surfaces('sample-5120-5120-5120-bem.fif', model)
+
+Where ``surfaces`` is a list of BEM surfaces that have each been read using
+:func:`mne.read_surface`. This step also checks that the input surfaces
+are complete and that they are topologically correct, *i.e.*,
+that the surfaces do not intersect and that the surfaces are correctly
+ordered (outer skull surface inside the scalp and inner skull surface
+inside the outer skull).
+
+This step assigns the conductivity values to the BEM compartments.
+For the scalp and the brain compartments, the default is 0.3 S/m.
+The default skull conductivity is 50 times smaller, *i.e.*,
+0.006 S/m. Recent publications, see :ref:`CEGEGDEI`, report
+a range of skull conductivity ratios ranging from 1:15 (Oostendorp *et
+al.*, 2000) to 1:25 - 1:50 (Slew *et al.*,
+2009, Conçalves *et al.*, 2003). The
+MNE default ratio 1:50 is based on the typical values reported in
+(Conçalves *et al.*, 2003), since their
+approach is based comparison of SEF/SEP measurements in a BEM model.
+The variability across publications may depend on individual variations
+but, more importantly, on the precision of the skull compartment
+segmentation.
+
+.. note:: To produce single layer BEM models (--homog flag in the C command
+          line tools) pass a list with one single conductivity value,
+          e.g. ``conductivities=[0.3]``.
+
+Using this model, the BEM solution can be computed using
+:func:`mne.make_bem_solution`` as::
+
+    >>> bem_sol = make_bem_solution(model)
+    >>> write_bem_solution('sample-5120-5120-5120-bem-sol.fif', bem_sol)
+
+After the BEM is set up it is advisable to check that the
+BEM model meshes are correctly positioned using *e.g.*, :class:`mne.Report`.
+
+.. note:: Up to this point all processing stages depend on the
+          anatomical (geometrical) information only and thus remain
+          identical across different MEG studies.
+
+.. note:: If you use custom head models you might need to set the ``ico=None`` 
+          parameter to ``None`` and skip subsampling of the surface.
+
+
+.. _CHDBEHDC:
+
+Aligning coordinate frames
+--------------------------
+
+The calculation of the forward solution requires knowledge
+of the relative location and orientation of the MEG/EEG and MRI
+coordinate systems (see :ref:`BJEBIBAI`). The head coordinate
+frame is defined by identifying the fiducial landmark locations,
+making the origin and orientation of the head coordinate system
+slightly user dependent. As a result, it is safest to reestablish
+the definition of the coordinate transformation computation
+for each experimental session, *i.e.*, each time when new head
+digitization data are employed.
+
+The interactive source analysis software :ref:`mne_analyze` provides
+tools for coordinate frame alignment, see :ref:`ch_interactive_analysis`.
+:ref:`CHDIJBIG` also contains tips for using :ref:`mne_analyze` for
+this purpose.
+
+.. warning:: This step is important. If the alignment of the
+             coordinate frames is inaccurate all subsequent processing
+             steps suffer from the error. Therefore, this step should be
+             performed by the person in charge of the study or by a trained
+             technician. Written or photographic documentation of the alignment
+             points employed during the MEG/EEG acquisition can also be
+             helpful.
+
+.. _BABCHEJD:
+
+Computing the forward solution
+------------------------------
+
+After the MRI-MEG/EEG alignment has been set, the forward
+solution, *i.e.*, the magnetic fields and electric
+potentials at the measurement sensors and electrodes due to dipole
+sources located on the cortex, can be calculated with help of
+:func:`mne.make_forward_solution` as::
+
+    >>> fwd = make_forward_solution(raw.info, fname_trans, src, bem_sol)
+
+.. _BABDEEEB:
+
+Computing the noise-covariance matrix
+-------------------------------------
+
+The MNE software employs an estimate of the noise-covariance
+matrix to weight the channels correctly in the calculations. The
+noise-covariance matrix provides information about field and potential
+patterns representing uninteresting noise sources of either human
+or environmental origin.
+
+The noise covariance matrix can be calculated in several
+ways:
+
+- Employ the individual epochs during
+  off-line averaging to calculate the full noise covariance matrix.
+  This is the recommended approach for evoked responses, *e.g.* using
+  :func:`mne.compute_covariance`::
+
+      >>> cov = mne.compute_covariance(epochs, method='auto')
+
+- Employ empty room data (collected without the subject) to
+  calculate the full noise covariance matrix. This is recommended
+  for analyzing ongoing spontaneous activity. This can be done using
+  :func:`mne.compute_raw_covariance` as::
+
+      >>> cov = mne.compute_raw_covariance(raw_erm)
+
+- Employ a section of continuous raw data collected in the presence
+  of the subject to calculate the full noise covariance matrix. This
+  is the recommended approach for analyzing epileptic activity. The
+  data used for this purpose should be free of technical artifacts
+  and epileptic activity of interest. The length of the data segment
+  employed should be at least 20 seconds. One can also use a long
+  (``*> 200 s``) segment of data with epileptic spikes present provided
+  that the spikes occur infrequently and that the segment is apparently
+  stationary with respect to background brain activity. This can also
+  use :func:`mne.compute_raw_covariance`.
+
+See :ref:`covariance` for more information.
+
+.. _CIHCFJEI:
+
+Calculating the inverse operator
+--------------------------------
+
+The MNE software doesn't calculate the inverse operator
+explicitly but rather computes an SVD of a matrix composed of the
+noise-covariance matrix, the result of the forward calculation,
+and the source covariance matrix. This approach has the benefit
+that the regularization parameter ('SNR') can
+be adjusted easily when the final source estimates or dSPMs are
+computed. For mathematical details of this approach,
+please consult :ref:`CBBDJFBJ`.
+
+This computation stage can be done by using
+:func:`mne.minimum_norm.make_inverse_operator` as::
+
+    >>> inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd, cov, loose=0.2)
+
+Creating source estimates
+-------------------------
+
+Once all the preprocessing steps described above have been
+completed, the inverse operator computed can be applied to the MEG
+and EEG data as::
+
+    >>> stc = mne.minimum_norm.apply_inverse(evoked, inv, lambda2=1. / 9.)
+
+And the results can be viewed as::
+
+    >>> stc.plot()
+
+The interactive analysis tool :ref:`mne_analyze` can also
+be used to explore the data and to produce quantitative analysis
+results, screen snapshots, and QuickTime (TM) movie files,
+see :ref:`ch_interactive_analysis`.
+
+Group analyses
+--------------
+
+Group analysis is facilitated by morphing source estimates, which can be
+done *e.g.*, to ``subject='fsaverage'`` as::
+
+    >>> stc_fsaverage = stc.morph('fsaverage')
+
+See :ref:`ch_morph` for more information.
diff --git a/doc/source/manual/sampledata.rst b/doc/manual/datasets.rst
similarity index 87%
rename from doc/source/manual/sampledata.rst
rename to doc/manual/datasets.rst
index 60c3707..73f1712 100644
--- a/doc/source/manual/sampledata.rst
+++ b/doc/manual/datasets.rst
@@ -6,12 +6,16 @@
 The sample data set
 ===================
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
 Purpose
 #######
 
 This Chapter gives a detailed description of the processing
 of a sample data set, which can be employed to familiarize with
-the workflow described in :ref:`ch_cookbook`.
+the workflow described in :ref:`cookbook`.
 
 .. note:: Going through the analysis exercise in    this chapter is not a substitute for reading other chapters of this    manual and understanding the concepts underlying MNE software.
 
@@ -28,7 +32,10 @@ Imaging. EEG data from a 60-channel electrode cap was acquired simultaneously wi
 the MEG. The original MRI data set was acquired with a Siemens 1.5 T
 Sonata scanner using an MPRAGE sequence.
 
-.. note:: These data are provided solely for the    purpose of getting familiar with the MNE software. They should not    be redistributed to third parties. The data should not be used to    evaluate the performance of the MEG or MRI system employed.
+.. note:: These data are provided solely for the purpose of getting familiar
+          with the MNE software. They should not be redistributed to third
+          parties. The data should not be used to evaluate the performance
+          of the MEG or MRI system employed.
 
 In the MEG/EEG experiment, checkerboard patterns were presented
 into the left and right visual field, interspersed by tones to the
@@ -56,46 +63,9 @@ of the corresponding trigger codes is provided in :ref:`BABDHIFJ`
 Setting up
 ##########
 
-The sample data set is distributed with the MNE software
-as a compressed tar archive located at ``$MNE_ROOT/sample-data/MNE-sample-data.tar.gz`` .
-To make a personal copy of the sample data set, follow these steps:
-
-- Set up for using the MNE software as
-  instructed in :ref:`user_environment` of this manual.
-
-- Create a directory for your personal copy: ``mkdir`` <*yourdir*> , where <*yourdir*> is
-  the location where you want your personal copy to reside. Tho store
-  the sample data set and to finish the tutorials in this Chapter, you
-  need approximately 600 MBytes of space on the disk where <*yourdir*> is
-  located.
-
-- Go to your newly created sample data directory: ``cd`` <*yourdir*> .
-
-- Extract the sample data: ``tar zxvf`` <*dir*> ``/MNE-sample-data.tar.gz`` ,
-  where <*dir*> is the location
-  of the tar archive, provided by your system administrator.
-
-To start the tutorials you need to:
-
-- Set up MNE software user environment, see :ref:`user_environment`.
-
-- Set the SUBJECTS_DIR environment variable:``setenv SUBJECTS_DIR`` <*yourdir*> ``/subjects`` (csh
-  and tcsh) or ``export SUBJECTS_DIR=`` <*yourdir*> ``/subjects`` (POSIX-compatible
-  shell). Most users at the Martinos Center have tcsh as their login shell.
-
-- Assign the SUBJECT environment variable the value ``sample`` .
-
-- For convenience, you can also set the environment variable
-  SAMPLE to <*yourdir*> . The following
-  tutorial examples assume you have done this.
-
-- Set up the FreeSurfer environment
-  using the commands specific to your site. The FreeSurfer license
-  is needed for the source space creation covered in :ref:`CHDIGEJG`.
-
-.. note:: From this point on, directories and files under    your personal copy of the sample data set under <*yourdir*> will    be referred to by relative pathnames. For example, the file <*yourdir*> ``/MEG/sample/audvis.ave`` will    be simply called ``MEG/sample/audvis.ave`` .
+The sample dataset can be downloaded automatically by doing::
 
-.. note:: You can also proceed without FreeSurfer installed    if you choose to use source space creation using the recursively    subdivided octahedron or icosahedron method. For more information,    see the Note in :ref:`CHDIGEJG`.
+    >>> mne.datasets.sample.data_path(verbose=True)
 
 Contents of the data set
 ########################
@@ -148,9 +118,6 @@ in the sample data set:
 - The MEG/EEG raw data file has been checked with the utilities described
   in :ref:`BABCDBDI` and :ref:`BABCDFJH`.
 
-- Template scripts for averaging and computation of the noise-covariance
-  matrices have been written.
-
 Setting up subject-specific data
 ################################
 
@@ -217,7 +184,7 @@ commands:
 
 ``mne_make_eeg_layout --fif sample_audvis_raw.fif --lout $HOME/.mne/lout/sample-EEG.lout``
 
-Please refer to :ref:`CHDDGDJA` for more information
+Please refer to :ref:`mne_make_eeg_layout` for more information
 on mne_make_eeg_layout .
 
 .. note:: It is usually sufficient to create one EEG layout    for each electrode cap you are using in your experiment rather than    using a different layout file for each data file generated using    the same cap.
@@ -258,7 +225,7 @@ below:
   session by clicking on their channel names on the left. You can
   save the bad channel selection to the file from File/Apply bad channels . Bad channel marking can be removed
   by clicking on their channel names again and selecting File/Apply bad channels . Alternatively, you can use the utility mne_mark_bad_channels to
-  set a bad channel selection, see :ref:`CHDDHBEE`.
+  set a bad channel selection, see :ref:`mne_mark_bad_channels`.
 
 - Switch the projections back on and change filter to a 40-Hz
   lowpass.
@@ -336,35 +303,7 @@ with the command:
 
 ``mne_process_raw --raw sample_audvis_raw.fif `` ``--lowpass 40 --projoff `` ``--saveavetag -ave --ave audvis.ave``
 
-The functions of the options are:
-
-**\---raw**
-
-    Specifies the raw file.
-
-**\---lowpass**
-
-    Specifies the lowpass filter corner frequency.
-
-**\---projoff**
-
-    Do not apply signal-space projection and average electrode reference
-    to the data. Regardless, the projection information is included with
-    the data file so that it can be applied later. It is also possible
-    to specify the ``--projon`` option but then there is no
-    possibility to view the original data in subsequent phases of the
-    analysis.
-
-**\---saveavetag**
-
-    Specifies how the averages are named. With this option, the ``_raw.fif`` ending
-    is stripped of the original raw data file and the tag specified
-    with this option (``--ave`` ) is added. The average file
-    and the corresponding log file will have the extensions ``.fif`` and ``.log`` , respectively.
-
-**\---ave**
-
-    Specifies the averaging script.
+See :ref:`mne_process_raw` for command-line options.
 
 As a result of running the averaging script a file called ``sample_audvis-ave.fif`` is
 created. It contains averages to the left and right ear auditory
diff --git a/doc/source/manual/analyze.rst b/doc/manual/gui/analyze.rst
similarity index 96%
rename from doc/source/manual/analyze.rst
rename to doc/manual/gui/analyze.rst
index cb97ce1..f369258 100644
--- a/doc/source/manual/analyze.rst
+++ b/doc/manual/gui/analyze.rst
@@ -1,15 +1,20 @@
 
 .. _ch_interactive_analysis:
 
-====================
-Interactive analysis
-====================
+=====================================
+Interactive analysis with mne_analyze
+=====================================
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
 
 Overview
 ########
 
 Interactive analysis of the MEG/EEG data and source estimates
-is facilitated by the mne_analyze tool.
+is facilitated by the :ref:`mne_analyze` tool.
 Its features include:
 
 - Viewing of evoked-response data or data
@@ -49,88 +54,10 @@ Its features include:
 - Viewing of continuous head-position data delivered by Elekta-Neuromag
   software.
 
-.. _CHDJECCG:
-
-Command line options
-####################
-
-Since mne_analyze is
-primarily an interactive analysis tool, there are only a few command-line
-options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---cd <*dir*>**
-
-    Change to this directory before starting.
-
-**\---subject <*name*>**
-
-    Specify the default subject name for surface loading.
-
-**\---digtrig <*name*>**
-
-    Name of the digital trigger channel. The default value is 'STI
-    014'. Underscores in the channel name will be replaced
-    by spaces.
-
-**\---digtrigmask <*number*>**
-
-    Mask to be applied to the raw data trigger channel values before considering
-    them. This option is useful if one wants to set some bits in a don't
-    care state. For example, some finger response pads keep the trigger
-    lines high if not in use, *i.e.*, a finger is
-    not in place. Yet, it is convenient to keep these devices permanently
-    connected to the acquisition system. The number can be given in
-    decimal or hexadecimal format (beginning with 0x or 0X). For example,
-    the value 255 (0xFF) means that only the lowest order byte (usually
-    trigger lines 1 - 8 or bits 0 - 7) will be considered.
-
-**\---visualizehpi**
-
-    Start mne_analyze in the restricted *head
-    position visualization* mode. For details, see :ref:`CHDEDFAE`.
-
-**\---dig <*filename*>**
-
-    Specify a file containing the head shape digitization data. This option
-    is only usable if the *head position visualization* position
-    visualization mode has been first invoked with the --visualizehpi
-    option.
-
-**\---hpi <*filename*>**
-
-    Specify a file containing the transformation between the MEG device
-    and head coordinate frames. This option is only usable if the *head
-    position visualization* position visualization mode has
-    been first invoked with the ``--visualizehpi`` option.
-
-**\---scalehead**
-
-    In *head position visualization* mode, scale
-    the average scalp surface according to the head surface digitization
-    data before aligning  them to the scalp surface. This option is
-    recommended.
-
-**\---rthelmet**
-
-    Use the room-temperature helmet surface instead of the MEG sensor
-    surface when showing the relative position of the MEG sensors and
-    the head in the *head position visualization* mode.
-
-.. note:: Before starting mne_analyze the ``SUBJECTS_DIR`` environment variable    has to be set.
-
-.. note:: Strictly speaking, trigger mask value zero would    mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
+See :ref:`mne_analyze` for command line options.
 
-.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_analyze .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
+.. note:: Before starting mne_analyze the ``SUBJECTS_DIR`` environment variable has to be set.
 
-.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
 
 The main window
 ###############
@@ -2458,7 +2385,7 @@ The newest versions of Neuromag software allow continuous
 acquisition of signals from the HPI coils. On the basis of these
 data the relative position of the dewar and the head can be computed
 a few times per second. The resulting location data, expressed in
-the form of unit quaternions (see http://mathworld.wolfram.com/Quaternion.html)
+the form of unit quaternions (see http://en.wikipedia.org/wiki/Quaternion)
 and a translation.
 
 The continuous HPI data can be through the File/View continuous HPI data... menu item, which pops up
@@ -2693,7 +2620,7 @@ an Elekta-Neuromag MEG system.
 
     Snapshot of mne_analyze in the head position visualization mode.
 
-As described in :ref:`CHDJECCG`, the head position
+As described in :ref:`mne_analyze`, the head position
 visualization mode can be customized with the --dig, --hpi, --scalehead,
 and --rthelmet options. For this mode to be useful, the --dig and
 --hpi options are mandatory. If existing saved data are viewed,
diff --git a/doc/source/manual/browse.rst b/doc/manual/gui/browse.rst
similarity index 81%
rename from doc/source/manual/browse.rst
rename to doc/manual/gui/browse.rst
index 8673354..692f440 100644
--- a/doc/source/manual/browse.rst
+++ b/doc/manual/gui/browse.rst
@@ -1,409 +1,36 @@
 
-
 .. _ch_browse:
 
-===================
-Processing raw data
-===================
+=====================================
+Browsing raw data with mne_browse_raw
+=====================================
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
 
 Overview
 ########
 
-The raw data processor mne_browse_raw is
+The raw data processor :ref:`mne_browse_raw` is
 designed for simple raw data viewing and processing operations. In
 addition, the program is capable of off-line averaging and estimation
-of covariance matrices. mne_browse_raw can
+of covariance matrices. :ref:`mne_browse_raw` can
 be also used to view averaged data in the topographical layout.
-Finally, mne_browse_raw can communicate
-with mne_analyze described in :ref:`ch_interactive_analysis` to
+Finally, :ref:`mne_browse_raw` can communicate
+with :ref:`mne_analyze` described in :ref:`ch_interactive_analysis` to
 calculate current estimates from raw data interactively.
 
-mne_browse_raw has also
-an alias, mne_process_raw . If mne_process_raw is
+:ref:`mne_browse_raw` has also
+an alias, :ref:`mne_process_raw`. If :ref:`mne_process_raw` is
 invoked, no user interface appears. Instead, command line options
 are used to specify the filtering parameters as well as averaging
 and covariance-matrix estimation command files for batch processing. This
-chapter discusses both mne_browse_raw and mne_process_raw .
-
-.. _CACHCFEG:
-
-Command-line options
-####################
-
-This section first describes the options common to mne_browse_raw and mne_process_raw .
-Thereafter, options unique to the interactive (mne_browse_raw)
-and batch (mne_process_raw) modes are
-listed.
-
-.. _BABBGJEA:
-
-Common options
-==============
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---cd <*dir*>**
-
-    Change to this directory before starting.
-
-**\---raw <*name*>**
-
-    Specifies the raw data file to be opened. This option is required
-    for batch version, mne_process_raw. If
-    a raw data file is not specified for the interactive version, mne_browse_raw ,
-    and empty interactive browser will open.
-
-**\---grad <*number*>**
-
-    Apply software gradient compensation of the given order to the data loaded
-    with the ``--raw`` option. This option is effective only
-    for data acquired with the CTF and 4D Magnes MEG systems. If orders
-    different from zero are requested for Neuromag data, an error message appears
-    and data are not loaded. Any compensation already existing in the
-    file can be undone or changed to another order by using an appropriate ``--grad`` options.
-    Possible orders are 0 (No compensation), 1 - 3 (CTF data), and 101
-    (Magnes data). The same compensation will be applied to all data
-    files loaded by mne_process_raw . For mne_browse_raw ,
-    this applies only to the data file loaded by specifying the ``--raw`` option.
-    For interactive data loading, the software gradient compensation
-    is specified in the corresponding file selection dialog, see :ref:`CACDCHAJ`.
-
-**\---filtersize <*size*>**
-
-    Adjust the length of the FFT to be applied in filtering. The number will
-    be rounded up to the next power of two. If the size is :math:`N`,
-    the corresponding length of time is :math:`N/f_s`,
-    where :math:`f_s` is the sampling frequency
-    of your data. The filtering procedure includes overlapping tapers
-    of length :math:`N/2` so that the total FFT
-    length will actually be :math:`2N`. This
-    value cannot be changed after the program has been started.
-
-**\---highpass <*value/Hz*>**
-
-    Highpass filter frequency limit. If this is too low with respect
-    to the selected FFT length and, the data will not be highpass filtered. It
-    is best to experiment with the interactive version to find the lowest applicable
-    filter for your data. This value can be adjusted in the interactive
-    version of the program. The default is 0, *i.e.*,
-    no highpass filter apart from that used during the acquisition will
-    be in effect.
-
-**\---highpassw <*value/Hz*>**
-
-    The width of the transition band of the highpass filter. The default
-    is 6 frequency bins, where one bin is :math:`f_s / (2N)`. This
-    value cannot be adjusted in the interactive version of the program.
-
-**\---lowpass <*value/Hz*>**
-
-    Lowpass filter frequency limit. This value can be adjusted in the interactive
-    version of the program. The default is 40 Hz.
-
-**\---lowpassw <*value/Hz*>**
-
-    The width of the transition band of the lowpass filter. This value
-    can be adjusted in the interactive version of the program. The default
-    is 5 Hz.
-
-**\---eoghighpass <*value/Hz*>**
-
-    Highpass filter frequency limit for EOG. If this is too low with respect
-    to the selected FFT length and, the data will not be highpass filtered.
-    It is best to experiment with the interactive version to find the
-    lowest applicable filter for your data. This value can be adjusted in
-    the interactive version of the program. The default is 0, *i.e.*,
-    no highpass filter apart from that used during the acquisition will
-    be in effect.
-
-**\---eoghighpassw <*value/Hz*>**
-
-    The width of the transition band of the EOG highpass filter. The default
-    is 6 frequency bins, where one bin is :math:`f_s / (2N)`.
-    This value cannot be adjusted in the interactive version of the
-    program.
-
-**\---eoglowpass <*value/Hz*>**
-
-    Lowpass filter frequency limit for EOG. This value can be adjusted in
-    the interactive version of the program. The default is 40 Hz.
-
-**\---eoglowpassw <*value/Hz*>**
-
-    The width of the transition band of the EOG lowpass filter. This value
-    can be adjusted in the interactive version of the program. The default
-    is 5 Hz.
-
-**\---filteroff**
-
-    Do not filter the data. This initial value can be changed in the
-    interactive version of the program.
-
-**\---digtrig <*name*>**
-
-    Name of the composite digital trigger channel. The default value
-    is 'STI 014'. Underscores in the channel name
-    will be replaced by spaces.
-
-**\---digtrigmask <*number*>**
-
-    Mask to be applied to the trigger channel values before considering them.
-    This option is useful if one wants to set some bits in a don't care
-    state. For example, some finger response pads keep the trigger lines
-    high if not in use, *i.e.*, a finger is not in
-    place. Yet, it is convenient to keep these devices permanently connected
-    to the acquisition system. The number can be given in decimal or
-    hexadecimal format (beginning with 0x or 0X). For example, the value
-    255 (0xFF) means that only the lowest order byte (usually trigger
-    lines 1 - 8 or bits 0 - 7) will be considered.
-
-.. note:: Multiple raw data files can be specified for mne_process_raw .
-
-.. note:: Strictly speaking, trigger mask value zero would    mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
-
-.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_browse_raw or mne_process_raw .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
-
-.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
-
-.. _CACCHAGA:
-
-Interactive mode options
-========================
-
-These options apply to the interactive (mne_browse_raw)
-version only.
-
-**\---allowmaxshield**
-
-    Allow loading of unprocessed Elekta-Neuromag data with MaxShield
-    on. These kind of data should never be used for source localization
-    without further processing with Elekta-Neuromag software.
-
-**\---deriv <*name*>**
-
-    Specifies the name of a derivation file. This overrides the use
-    of a standard derivation file, see :ref:`CACFHAFH`.
-
-**\---sel <*name*>**
-
-    Specifies the channel selection file to be used. This overrides
-    the use of the standard channel selection files, see :ref:`CACCJEJD`.
-
-.. _CACFAAAJ:
+chapter discusses both :ref:`mne_browse_raw` and :ref:`mne_process_raw`.
 
-Batch-mode options
-==================
-
-These options apply to the batch-mode version, mne_process_raw only.
-
-**\---proj <*name*>**
-
-    Specify the name of the file of the file containing a signal-space
-    projection (SSP) operator. If ``--proj`` options are present
-    the data file is not consulted for an SSP operator. The operator
-    corresponding to average EEG reference is always added if EEG data
-    are present.
-
-**\---projon**
-
-    Activate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
-    be present on the mne_processs_raw command line.
-
-**\---projoff**
-
-    Deactivate the projections loaded. One of the options ``--projon`` or ``--projoff`` must
-    be present on the mne_processs_raw command line.
-
-**\---makeproj**
-
-    Estimate the noise subspace from the data and create a new signal-space
-    projection operator instead of using one attached to the data file
-    or those specified with the ``--proj`` option. The following
-    eight options define the parameters of the noise subspace estimation. More
-    information on the signal-space projection can be found in :ref:`CACCHABI`.
-
-**\---projevent <*no*>**
-
-    Specifies the events which identify the time points of interest
-    for projector calculation. When this option is present, ``--projtmin`` and ``--projtmax`` are
-    relative to the time point of the event rather than the whole raw
-    data file.
-
-**\---projtmin <*time/s*>**
-
-    Specify the beginning time for the calculation of the covariance matrix
-    which serves as the basis for the new SSP operator. This option
-    is required with ``--projevent`` and defaults to the beginning
-    of the raw data file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
-    are present.
-
-**\---projtmax <*time/s*>**
-
-    Specify the ending time for the calculation of the covariance matrix which
-    serves as the basis for the new SSP operator. This option is required
-    with ``--projevent`` and defaults to the end of the raw data
-    file otherwise. This option is effective only if ``--makeproj`` or ``--saveprojtag`` options
-    are present.
-
-**\---projngrad <*number*>**
-
-    Number of SSP components to include for planar gradiometers (default
-    = 5). This value is system dependent. For example, in a well-shielded
-    quiet environment, no planar gradiometer projections are usually
-    needed.
-
-**\---projnmag <*number*>**
-
-    Number of SSP components to include for magnetometers / axial gradiometers
-    (default = 8). This value is system dependent. For example, in a
-    well-shielded quiet environment, 3 - 4 components are need
-    while in a noisy environment with light shielding even more than
-    8 components may be necessary.
-
-**\---projgradrej <*value/ fT/cm*>**
-
-    Rejection limit for planar gradiometers in the estimation of the covariance
-    matrix frfixom which the new SSP operator is derived. The default
-    value is 2000 fT/cm. Again, this value is system dependent.
-
-**\---projmagrej <*value/ fT*>**
-
-    Rejection limit for planar gradiometers in the estimation of the covariance
-    matrix from which the new SSP operator is derived. The default value
-    is 3000 fT. Again, this value is system dependent.
-
-**\---saveprojtag <*tag*>**
-
-    This option defines the names of files to hold the SSP operator.
-    If this option is present the ``--makeproj`` option is
-    implied. The SSP operator file name is formed by removing the trailing ``.fif`` or ``_raw.fif`` from
-    the raw data file name by appending  <*tag*> .fif
-    to this stem. Recommended value for <*tag*> is ``-proj`` .
-
-**\---saveprojaug**
-
-    Specify this option if you want to use the projection operator file output
-    in the Elekta-Neuromag Signal processor (graph) software.
-
-**\---eventsout <*name*>**
-
-    List the digital trigger channel events to the specified file. By default,
-    only transitions from zero to a non-zero value are listed. If multiple
-    raw data files are specified, an equal number of ``--eventsout`` options
-    should be present. If the file name ends with .fif, the output will
-    be in fif format, otherwise a text event file will be output.
-
-**\---allevents**
+See command-line documentation of :ref:`mne_browse_raw` and `mne_process_raw`.
 
-    List all transitions to file specified with the ``--eventsout`` option.
-
-**\---events <*name*>**
-
-    Specifies the name of a fif or text format event file (see :ref:`CACBCEGC`) to be associated with a raw data file to be
-    processed. If multiple raw data files are specified, the number
-    of ``--events`` options can be smaller or equal to the
-    number of raw data files. If it is equal, the event filenames will
-    be associated with the raw data files in the order given. If it
-    is smaller, the remaining raw data files for which an event file
-    is not specified will *not* have an event file associated
-    with them. The event file format is recognized from the file name:
-    if it ends with ``.fif`` , the file is assumed to be in
-    fif format, otherwise a text file is expected.
-
-**\---ave <*name*>**
-
-    Specifies the name of an off-line averaging description file. For details
-    of the format of this file, please consult :ref:`CACBBDGC`.
-    If multiple raw data files are specified, the number of ``--ave`` options
-    can be smaller or equal to the number of raw data files. If it is
-    equal, the averaging description file names will be associated with
-    the raw data files in the order given. If it is smaller, the last
-    description file will be used for the remaining raw data files.
-
-**\---saveavetag <*tag*>**
-
-    If this option is present and averaging is evoked with the ``--ave`` option,
-    the outfile and logfile options in the averaging description file
-    are ignored. Instead, trailing ``.fif`` or ``_raw.fif`` is
-    removed from the raw data file name and <*tag*> ``.fif`` or <*tag*> ``.log`` is appended
-    to create the output and log file names, respectively.
-
-**\---gave <*name*>**
-
-    If multiple raw data files are specified as input and averaging
-    is requested, the grand average over all data files will be saved
-    to <*name*> .
-
-**\---cov <*name*>**
-
-    Specify the name of a description file for covariance matrix estimation. For
-    details of the format of this file, please see :ref:`CACEBACG`.
-    If multiple raw data files are specified, the number of ``--cov`` options can
-    be smaller or equal to the number of raw data files. If it is equal, the
-    averaging description file names will be associated with the raw data
-    files in the order given. If it is smaller, the last description
-    file will be used for the remaining raw data files.
-
-**\---savecovtag <*tag*>**
-
-    If this option is present and covariance matrix estimation is evoked with
-    the ``--cov`` option, the outfile and logfile options in
-    the covariance estimation description file are ignored. Instead,
-    trailing ``.fif`` or ``_raw.fif`` is removed from
-    the raw data file name and <*tag*> .fif or <*tag*> .log
-    is appended to create the output and log file names, respectively.
-    For compatibility with other MNE software scripts, ``--savecovtag -cov`` is recommended.
-
-**\---savehere**
-
-    If the ``--saveavetag`` and ``--savecovtag`` options
-    are used to generate the file output file names, the resulting files
-    will go to the same directory as raw data by default. With this
-    option the output files will be generated in the current working
-    directory instead.
-
-**\---gcov <*name*>**
-
-    If multiple raw data files are specified as input and covariance matrix estimation
-    is requested, the grand average over all data files will be saved
-    to <*name*> . The details of
-    the covariance matrix estimation are given in :ref:`CACHAAEG`.
-
-**\---save <*name*>**
-
-    Save a filtered and optionally down-sampled version of the data
-    file to <*name*> . If multiple
-    raw data files are specified, an equal number of ``--save`` options
-    should be present. If <*filename*> ends
-    with ``.fif`` or ``_raw.fif`` , these endings are
-    deleted. After these modifications, ``_raw.fif`` is inserted
-    after the remaining part of the file name. If the file is split
-    into multiple parts (see ``--split`` option below), the
-    additional parts will be called <*name*> ``-`` <*number*> ``_raw.fif``
-
-**\---split <*size/MB*>**
-
-    Specifies the maximum size of the raw data files saved with the ``--save`` option.
-    By default, the output is split into files which are just below
-    2 GB so that the fif file maximum size is not exceed.
-
-**\---anon**
-
-    Do not include any subject information in the output files created with
-    the ``--save`` option.
-
-**\---decim <*number*>**
-
-    The data are decimated by this factor before saving to the file
-    specified with the ``--save`` option. For decimation to
-    succeed, the data must be lowpass filtered to less than third of
-    the sampling frequency effective after decimation.
 
 The user interface
 ##################
@@ -510,7 +137,7 @@ deleted. After these modifications, ``_raw.fif`` is inserted
 after the remaining part of the file name. If the file is split
 into multiple parts, the additional parts will be called <*name*> ``-`` <*number*> ``_raw.fif`` .
 For downsampling and saving options in mne_process_raw ,
-see :ref:`CACFAAAJ`.
+see :ref:`mne_process_raw`.
 
 Change working directory
 ========================
@@ -588,7 +215,7 @@ Load derivations
 
 This menu choice allows loading of channel derivation data
 files created with the mne_make_derivations utility,
-see :ref:`CHDHJABJ`, or using the interactive derivations
+see :ref:`mne_make_derivations`, or using the interactive derivations
 editor in mne_browse_raw , see :ref:`CACJIEHI`, Most common use of derivations is to calculate
 differences between EEG channels, *i.e.*, bipolar
 EEG data. Since any number of channels can be included in a derivation
@@ -618,7 +245,7 @@ channels cannot be displayed in topographical data displays. Derived
 channels are not included in averages or noise covariance matrix
 estimation.
 
-.. note:: If the file ``$HOME/.mne/mne_browse_raw-deriv.fif`` exists and    contains derivation data, it is loaded automatically when mne_browse_raw starts    unless the ``--deriv`` option has been used to specify    a nonstandard derivation file, see :ref:`CACCHAGA`.
+.. note:: If the file ``$HOME/.mne/mne_browse_raw-deriv.fif`` exists and    contains derivation data, it is loaded automatically when mne_browse_raw starts    unless the ``--deriv`` option has been used to specify    a nonstandard derivation file, see :ref:`mne_browse_raw`.
 
 Save derivations
 ================
@@ -680,7 +307,7 @@ The items in the dialog have the following functions:
 
     The half-amplitude point of the highpass filter. The width of the transition
     from zero to one can be specified with the ``--highpassw`` command-line
-    option, see :ref:`CACHCFEG`. Lowest feasible highpass value
+    option, see :ref:`mne_browse_raw`. Lowest feasible highpass value
     is constrained by the length of the filter and sampling frequency.
     You will be informed when you press OK or Apply if
     the selected highpass could not be realized. The default value zero means
@@ -860,7 +487,7 @@ Brings up the interactive derivations editor. This editor
 can be used to add or modify derived channels, *i.e.*,
 linear combinations of signals actually recorded. Channel derivations
 can be also created and modified using the mne_make_derivations tool,
-see :ref:`CHDHJABJ`. The interactive editor contains two main
+see :ref:`mne_make_derivations`. The interactive editor contains two main
 areas:
 
 - Interactive tools for specifying a channel
@@ -886,7 +513,7 @@ associated selections interactively involves the following steps:
 
 - If desired, EEG channels can be relabeled
   with descriptive names using the mne_rename_channels utility,
-  see :ref:`CHDCFEAJ`. It is strongly recommended that you
+  see :ref:`mne_rename_channels`. It is strongly recommended that you
   keep a copy of the channel alias file used by mne_rename_channels .
   If necessary, you can then easily return to the original channel
   names by running mne_rename_channels again
@@ -1070,7 +697,7 @@ for the Neuromag Vectorview data because newer systems do not have
 spaces in the channel names like the original Vectorview systems
 did.
 
-.. note:: The mne_make_eeg_layout utility    can be employed to create a layout file matching the positioning    of EEG electrodes, see :ref:`CHDDGDJA`.
+.. note:: The mne_make_eeg_layout utility    can be employed to create a layout file matching the positioning    of EEG electrodes, see :ref:`mne_make_eeg_layout`.
 
 .. _CACDDIDH:
 
@@ -1290,7 +917,7 @@ is introduced. More information on the SSP method can be found in :ref:`CACCHABI
 
 .. note:: The new projection data created in mne_browse_raw is    not automatically copied to the data file. You need to create a    standalone projection file from File/Save projection... to    save the new projection data and load it manually after the data    file has been loaded if you want to include in any subsequent analysis.
 
-.. note:: The command-line options for mne_process_raw allow    calculation of the SSP operator from continuous data in the batch    mode, see :ref:`CACFAAAJ`.
+.. note:: The command-line options for mne_process_raw allow    calculation of the SSP operator from continuous data in the batch    mode, see :ref:`mne_process_raw`.
 
 .. _BABDJGGJ:
 
@@ -1499,7 +1126,8 @@ interesting time points in the data. When a raw data file is opened,
 a standard event file is consulted for the list of events. If this
 file is not present, the digital trigger channel, defined by the --digtrig option
 or the ``MNE_TRIGGER_CH_NAME`` environment variable is
-scanned for events. For more information, see :ref:`BABBGJEA` and :ref:`CACDCHAJ`.
+scanned for events. For more information, see the command-line references
+for :ref:`mne_browse_raw` and :ref:`mne_process_raw`.
 
 In addition to the events detected on the trigger channel,
 it is possible to associate user-defined events to the data, either
@@ -1614,9 +1242,8 @@ The directory in which the raw data file resides now contains
 an annotation file which will be automatically loaded each time
 the data file is opened. A text format event file suitable for this
 purpose can be created manually, extracted from an EDF+ file using
-the ``--tal`` option in mne_edf2fiff discussed
-in :ref:`BABHDBBD`, or produced by custom software used during
-data acquisition.
+the ``--tal`` option in :ref:`mne_edf2fiff`, or produced by custom
+software used during data acquisition.
 
 .. _BABCIGGH:
 
@@ -1779,7 +1406,7 @@ layouts reside in ``$MNE_ROOT/share/mne/mne_analyze/lout`` .
 In addition any layout files residing in ``$HOME/.mne/lout`` are listed.
 The format of the layout files is the same as for the Neuromag programs xplotter and xfit .
 A custom EEG layout can be easily created with the mne_make_eeg_layout utility,
-see :ref:`CHDDGDJA`.
+see :ref:`mne_make_eeg_layout`.
 
 Several actions can be performed with the mouse in the topographical data
 display:
@@ -2419,7 +2046,8 @@ leak through because :math:`P_{\perp}b_n(t) \neq 0`. If the any
 of the brain signal vectors :math:`b_s(t)` is
 close to the noise subspace not only the noise but also the signal
 will be attenuated by the application of :math:`P_{\perp}` and,
-consequently, there might by little gain in signal-to-noise ratio. :ref:`CACFGIEC` demonstrates the effect of SSP on the Vectorview
+consequently, there might by little gain in signal-to-noise ratio.
+:ref:`CACFGIEC` demonstrates the effect of SSP on the Vectorview
 magnetometer data. After the elimination of a three-dimensional
 noise subspace, the absolute value of the noise is dampened approximately
 by a factor of 10 and the covariance matrix becomes diagonally dominant.
@@ -2431,29 +2059,6 @@ is accomplished by mne_inverse_operator as
 described in :ref:`CBBDDBGF`. For more information on SSP,
 please consult the references listed in :ref:`CEGIEEBB`.
 
-.. _CACFGIEC:
-
-.. figure:: pics/proj-off-on.png
-    :alt: example of the effect of SSP
-
-    An example of the effect of SSP
-    
-    The covariance matrix :math:`C_n` of noise data on the 102 Vectorview magnetometers was computed (a) before and (b) after the application of SSP with three-dimensional noise subspace. The plotted quantity is :math:`\sqrt {|(C_n)_{jk}|}`. Note that the vertical scale in (b) is ten times smaller than in (a).
-
-.. _BABFFCHF:
-
-Estimation of the noise subspace
-================================
-
-As described above, application of SSP requires the estimation
-of the signal vectors :math:`b_1 \dotso b_m` constituting
-the noise subspace. The most common approach, also implemented in mne_browse_raw is
-to compute a covariance matrix of empty room data, compute its eigenvalue
-decomposition, and employ the eigenvectors corresponding to the
-highest eigenvalues as basis for the noise subspace. It is also
-customary to use a separate set of vectors for magnetometers and
-gradiometers in the Vectorview system.
-
 EEG average electrode reference
 ===============================
 
diff --git a/doc/source/manual/mne_analyze/MNE_preferences.png b/doc/manual/gui/mne_analyze/MNE_preferences.png
similarity index 100%
rename from doc/source/manual/mne_analyze/MNE_preferences.png
rename to doc/manual/gui/mne_analyze/MNE_preferences.png
diff --git a/doc/source/manual/mne_analyze/adjust_alignment.png b/doc/manual/gui/mne_analyze/adjust_alignment.png
similarity index 100%
rename from doc/source/manual/mne_analyze/adjust_alignment.png
rename to doc/manual/gui/mne_analyze/adjust_alignment.png
diff --git a/doc/source/manual/mne_analyze/adjust_lights.png b/doc/manual/gui/mne_analyze/adjust_lights.png
similarity index 100%
rename from doc/source/manual/mne_analyze/adjust_lights.png
rename to doc/manual/gui/mne_analyze/adjust_lights.png
diff --git a/doc/source/manual/mne_analyze/adjust_menu.png b/doc/manual/gui/mne_analyze/adjust_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/adjust_menu.png
rename to doc/manual/gui/mne_analyze/adjust_menu.png
diff --git a/doc/source/manual/mne_analyze/cont_hpi_data.png b/doc/manual/gui/mne_analyze/cont_hpi_data.png
similarity index 100%
rename from doc/source/manual/mne_analyze/cont_hpi_data.png
rename to doc/manual/gui/mne_analyze/cont_hpi_data.png
diff --git a/doc/source/manual/mne_analyze/dipole_list.png b/doc/manual/gui/mne_analyze/dipole_list.png
similarity index 100%
rename from doc/source/manual/mne_analyze/dipole_list.png
rename to doc/manual/gui/mne_analyze/dipole_list.png
diff --git a/doc/source/manual/mne_analyze/dipole_parameters.png b/doc/manual/gui/mne_analyze/dipole_parameters.png
similarity index 100%
rename from doc/source/manual/mne_analyze/dipole_parameters.png
rename to doc/manual/gui/mne_analyze/dipole_parameters.png
diff --git a/doc/source/manual/mne_analyze/dipoles_menu.png b/doc/manual/gui/mne_analyze/dipoles_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/dipoles_menu.png
rename to doc/manual/gui/mne_analyze/dipoles_menu.png
diff --git a/doc/source/manual/mne_analyze/epoch_selector.png b/doc/manual/gui/mne_analyze/epoch_selector.png
similarity index 100%
rename from doc/source/manual/mne_analyze/epoch_selector.png
rename to doc/manual/gui/mne_analyze/epoch_selector.png
diff --git a/doc/source/manual/mne_analyze/field_mapping_pref.png b/doc/manual/gui/mne_analyze/field_mapping_pref.png
similarity index 100%
rename from doc/source/manual/mne_analyze/field_mapping_pref.png
rename to doc/manual/gui/mne_analyze/field_mapping_pref.png
diff --git a/doc/source/manual/mne_analyze/file_menu.png b/doc/manual/gui/mne_analyze/file_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/file_menu.png
rename to doc/manual/gui/mne_analyze/file_menu.png
diff --git a/doc/source/manual/mne_analyze/hardcopy_controls.png b/doc/manual/gui/mne_analyze/hardcopy_controls.png
similarity index 100%
rename from doc/source/manual/mne_analyze/hardcopy_controls.png
rename to doc/manual/gui/mne_analyze/hardcopy_controls.png
diff --git a/doc/source/manual/mne_analyze/help_menu.png b/doc/manual/gui/mne_analyze/help_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/help_menu.png
rename to doc/manual/gui/mne_analyze/help_menu.png
diff --git a/doc/source/manual/mne_analyze/image_dialog.png b/doc/manual/gui/mne_analyze/image_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/image_dialog.png
rename to doc/manual/gui/mne_analyze/image_dialog.png
diff --git a/doc/source/manual/mne_analyze/label_list.png b/doc/manual/gui/mne_analyze/label_list.png
similarity index 100%
rename from doc/source/manual/mne_analyze/label_list.png
rename to doc/manual/gui/mne_analyze/label_list.png
diff --git a/doc/source/manual/mne_analyze/labels_menu.png b/doc/manual/gui/mne_analyze/labels_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/labels_menu.png
rename to doc/manual/gui/mne_analyze/labels_menu.png
diff --git a/doc/source/manual/mne_analyze/main_window.png b/doc/manual/gui/mne_analyze/main_window.png
similarity index 100%
rename from doc/source/manual/mne_analyze/main_window.png
rename to doc/manual/gui/mne_analyze/main_window.png
diff --git a/doc/source/manual/mne_analyze/movie_dialog.png b/doc/manual/gui/mne_analyze/movie_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/movie_dialog.png
rename to doc/manual/gui/mne_analyze/movie_dialog.png
diff --git a/doc/source/manual/mne_analyze/mri_viewer.png b/doc/manual/gui/mne_analyze/mri_viewer.png
similarity index 100%
rename from doc/source/manual/mne_analyze/mri_viewer.png
rename to doc/manual/gui/mne_analyze/mri_viewer.png
diff --git a/doc/source/manual/mne_analyze/open_dialog.png b/doc/manual/gui/mne_analyze/open_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/open_dialog.png
rename to doc/manual/gui/mne_analyze/open_dialog.png
diff --git a/doc/source/manual/mne_analyze/overlay_management.png b/doc/manual/gui/mne_analyze/overlay_management.png
similarity index 100%
rename from doc/source/manual/mne_analyze/overlay_management.png
rename to doc/manual/gui/mne_analyze/overlay_management.png
diff --git a/doc/source/manual/mne_analyze/patch_selection_dialog.png b/doc/manual/gui/mne_analyze/patch_selection_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/patch_selection_dialog.png
rename to doc/manual/gui/mne_analyze/patch_selection_dialog.png
diff --git a/doc/source/manual/mne_analyze/save_label_timecourse.png b/doc/manual/gui/mne_analyze/save_label_timecourse.png
similarity index 100%
rename from doc/source/manual/mne_analyze/save_label_timecourse.png
rename to doc/manual/gui/mne_analyze/save_label_timecourse.png
diff --git a/doc/source/manual/mne_analyze/scales_dialog.png b/doc/manual/gui/mne_analyze/scales_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/scales_dialog.png
rename to doc/manual/gui/mne_analyze/scales_dialog.png
diff --git a/doc/source/manual/mne_analyze/surface_controls.png b/doc/manual/gui/mne_analyze/surface_controls.png
similarity index 100%
rename from doc/source/manual/mne_analyze/surface_controls.png
rename to doc/manual/gui/mne_analyze/surface_controls.png
diff --git a/doc/source/manual/mne_analyze/surface_selection_dialog.png b/doc/manual/gui/mne_analyze/surface_selection_dialog.png
similarity index 100%
rename from doc/source/manual/mne_analyze/surface_selection_dialog.png
rename to doc/manual/gui/mne_analyze/surface_selection_dialog.png
diff --git a/doc/source/manual/mne_analyze/timecourse_manager.png b/doc/manual/gui/mne_analyze/timecourse_manager.png
similarity index 100%
rename from doc/source/manual/mne_analyze/timecourse_manager.png
rename to doc/manual/gui/mne_analyze/timecourse_manager.png
diff --git a/doc/source/manual/mne_analyze/view_menu.png b/doc/manual/gui/mne_analyze/view_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/view_menu.png
rename to doc/manual/gui/mne_analyze/view_menu.png
diff --git a/doc/source/manual/mne_analyze/viewer.png b/doc/manual/gui/mne_analyze/viewer.png
similarity index 100%
rename from doc/source/manual/mne_analyze/viewer.png
rename to doc/manual/gui/mne_analyze/viewer.png
diff --git a/doc/source/manual/mne_analyze/viewer_options.png b/doc/manual/gui/mne_analyze/viewer_options.png
similarity index 100%
rename from doc/source/manual/mne_analyze/viewer_options.png
rename to doc/manual/gui/mne_analyze/viewer_options.png
diff --git a/doc/source/manual/mne_analyze/visualize_hpi.png b/doc/manual/gui/mne_analyze/visualize_hpi.png
similarity index 100%
rename from doc/source/manual/mne_analyze/visualize_hpi.png
rename to doc/manual/gui/mne_analyze/visualize_hpi.png
diff --git a/doc/source/manual/mne_analyze/windows_menu.png b/doc/manual/gui/mne_analyze/windows_menu.png
similarity index 100%
rename from doc/source/manual/mne_analyze/windows_menu.png
rename to doc/manual/gui/mne_analyze/windows_menu.png
diff --git a/doc/source/manual/mne_browse_raw/adjust_menu.png b/doc/manual/gui/mne_browse_raw/adjust_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/adjust_menu.png
rename to doc/manual/gui/mne_browse_raw/adjust_menu.png
diff --git a/doc/source/manual/mne_browse_raw/adust_menu.png b/doc/manual/gui/mne_browse_raw/adust_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/adust_menu.png
rename to doc/manual/gui/mne_browse_raw/adust_menu.png
diff --git a/doc/source/manual/mne_browse_raw/average_pref.png b/doc/manual/gui/mne_browse_raw/average_pref.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/average_pref.png
rename to doc/manual/gui/mne_browse_raw/average_pref.png
diff --git a/doc/source/manual/mne_browse_raw/channel_selection.png b/doc/manual/gui/mne_browse_raw/channel_selection.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/channel_selection.png
rename to doc/manual/gui/mne_browse_raw/channel_selection.png
diff --git a/doc/source/manual/mne_browse_raw/file_menu.png b/doc/manual/gui/mne_browse_raw/file_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/file_menu.png
rename to doc/manual/gui/mne_browse_raw/file_menu.png
diff --git a/doc/source/manual/mne_browse_raw/filter_dialog.png b/doc/manual/gui/mne_browse_raw/filter_dialog.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/filter_dialog.png
rename to doc/manual/gui/mne_browse_raw/filter_dialog.png
diff --git a/doc/source/manual/mne_browse_raw/help_menu.png b/doc/manual/gui/mne_browse_raw/help_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/help_menu.png
rename to doc/manual/gui/mne_browse_raw/help_menu.png
diff --git a/doc/source/manual/mne_browse_raw/main.png b/doc/manual/gui/mne_browse_raw/main.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/main.png
rename to doc/manual/gui/mne_browse_raw/main.png
diff --git a/doc/source/manual/mne_browse_raw/manage_averages_dialog.png b/doc/manual/gui/mne_browse_raw/manage_averages_dialog.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/manage_averages_dialog.png
rename to doc/manual/gui/mne_browse_raw/manage_averages_dialog.png
diff --git a/doc/source/manual/mne_browse_raw/new_selection.png b/doc/manual/gui/mne_browse_raw/new_selection.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/new_selection.png
rename to doc/manual/gui/mne_browse_raw/new_selection.png
diff --git a/doc/source/manual/mne_browse_raw/new_ssp.png b/doc/manual/gui/mne_browse_raw/new_ssp.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/new_ssp.png
rename to doc/manual/gui/mne_browse_raw/new_ssp.png
diff --git a/doc/source/manual/mne_browse_raw/open_dialog copy.png b/doc/manual/gui/mne_browse_raw/open_dialog copy.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/open_dialog copy.png
rename to doc/manual/gui/mne_browse_raw/open_dialog copy.png
diff --git a/doc/source/manual/mne_browse_raw/open_dialog.png b/doc/manual/gui/mne_browse_raw/open_dialog.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/open_dialog.png
rename to doc/manual/gui/mne_browse_raw/open_dialog.png
diff --git a/doc/source/manual/mne_browse_raw/process_menu.png b/doc/manual/gui/mne_browse_raw/process_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/process_menu.png
rename to doc/manual/gui/mne_browse_raw/process_menu.png
diff --git a/doc/source/manual/mne_browse_raw/process_menu2.png b/doc/manual/gui/mne_browse_raw/process_menu2.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/process_menu2.png
rename to doc/manual/gui/mne_browse_raw/process_menu2.png
diff --git a/doc/source/manual/mne_browse_raw/scales_dialog.png b/doc/manual/gui/mne_browse_raw/scales_dialog.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/scales_dialog.png
rename to doc/manual/gui/mne_browse_raw/scales_dialog.png
diff --git a/doc/source/manual/mne_browse_raw/scales_dialog2.png b/doc/manual/gui/mne_browse_raw/scales_dialog2.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/scales_dialog2.png
rename to doc/manual/gui/mne_browse_raw/scales_dialog2.png
diff --git a/doc/source/manual/mne_browse_raw/toolbar.png b/doc/manual/gui/mne_browse_raw/toolbar.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/toolbar.png
rename to doc/manual/gui/mne_browse_raw/toolbar.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-0.png b/doc/manual/gui/mne_browse_raw/windows_menu-0.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-0.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-0.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-1.png b/doc/manual/gui/mne_browse_raw/windows_menu-1.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-1.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-1.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-10.png b/doc/manual/gui/mne_browse_raw/windows_menu-10.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-10.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-10.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-11.png b/doc/manual/gui/mne_browse_raw/windows_menu-11.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-11.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-11.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-12.png b/doc/manual/gui/mne_browse_raw/windows_menu-12.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-12.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-12.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-13.png b/doc/manual/gui/mne_browse_raw/windows_menu-13.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-13.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-13.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-14.png b/doc/manual/gui/mne_browse_raw/windows_menu-14.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-14.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-14.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-15.png b/doc/manual/gui/mne_browse_raw/windows_menu-15.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-15.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-15.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-16.png b/doc/manual/gui/mne_browse_raw/windows_menu-16.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-16.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-16.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-17.png b/doc/manual/gui/mne_browse_raw/windows_menu-17.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-17.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-17.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-2.png b/doc/manual/gui/mne_browse_raw/windows_menu-2.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-2.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-2.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-3.png b/doc/manual/gui/mne_browse_raw/windows_menu-3.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-3.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-3.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-4.png b/doc/manual/gui/mne_browse_raw/windows_menu-4.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-4.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-4.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-5.png b/doc/manual/gui/mne_browse_raw/windows_menu-5.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-5.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-5.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-6.png b/doc/manual/gui/mne_browse_raw/windows_menu-6.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-6.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-6.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-7.png b/doc/manual/gui/mne_browse_raw/windows_menu-7.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-7.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-7.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-8.png b/doc/manual/gui/mne_browse_raw/windows_menu-8.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-8.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-8.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu-9.png b/doc/manual/gui/mne_browse_raw/windows_menu-9.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu-9.png
rename to doc/manual/gui/mne_browse_raw/windows_menu-9.png
diff --git a/doc/source/manual/mne_browse_raw/windows_menu.png b/doc/manual/gui/mne_browse_raw/windows_menu.png
similarity index 100%
rename from doc/source/manual/mne_browse_raw/windows_menu.png
rename to doc/manual/gui/mne_browse_raw/windows_menu.png
diff --git a/doc/manual/index.rst b/doc/manual/index.rst
new file mode 100644
index 0000000..4217a0c
--- /dev/null
+++ b/doc/manual/index.rst
@@ -0,0 +1,142 @@
+.. _manual:
+
+Manual
+======
+
+If you are new to MNE, consider first reading the :ref:`cookbook`, as it
+gives some simple steps for starting with analysis. The other sections provide
+more in-depth information about how to use the software.
+You can also jump to the :ref:`api_reference` for specific Python function
+and class usage information.
+
+.. contents:: Contents
+   :local:
+   :depth: 1
+
+
+Cookbook
+--------
+
+A quick run-through of the basic steps involved in M/EEG source analysis.
+
+.. toctree::
+   :maxdepth: 2
+
+   cookbook
+
+Reading your data
+-----------------
+
+How to get your raw data loaded in MNE.
+
+.. toctree::
+   :maxdepth: 2
+
+   io
+
+Preprocessing
+-------------
+
+Dealing with artifacts and noise sources in data.
+
+.. toctree::
+   :maxdepth: 1
+
+   preprocessing/overview
+   preprocessing/bads
+   preprocessing/filter
+   preprocessing/ica
+   preprocessing/ssp
+
+Source localization
+-------------------
+
+Projecting raw data into source (brain) space.
+
+.. toctree::
+   :maxdepth: 1
+
+   source_localization/anatomy
+   source_localization/forward
+   source_localization/covariance
+   source_localization/inverse
+   source_localization/morph
+
+Time frequency analysis
+-----------------------
+
+Decomposing time-domain signals into time-frequency representations.
+
+.. toctree::
+   :maxdepth: 2
+
+   time_frequency
+
+Statistics
+----------
+
+Using parametric and non-parametric tests with M/EEG data.
+
+.. toctree::
+   :maxdepth: 2
+
+   statistics
+
+Visualization
+-------------
+
+Various tools and techniques for getting a handle on your data.
+
+.. toctree::
+   :maxdepth: 2
+
+   visualization
+
+Datasets
+--------
+
+Some of the datasets made available to MNE users.
+
+.. toctree::
+   :maxdepth: 1
+
+   datasets
+
+C tools
+-------
+
+Additional information about various MNE-C tools.
+
+.. toctree::
+   :maxdepth: 1
+
+   c_reference
+   gui/analyze
+   gui/browse
+   c_cookbook
+
+
+MATLAB tools
+------------
+
+Information about the MATLAB toolbox.
+
+.. toctree::
+   :maxdepth: 2
+
+   matlab
+
+Appendices
+----------
+
+More details about our implementations and software.
+
+.. toctree::
+   :maxdepth: 1
+
+   appendix/overview
+   appendix/bem_model
+   appendix/martinos
+   appendix/c_misc
+   appendix/c_release_notes
+   appendix/c_EULA
diff --git a/doc/manual/io.rst b/doc/manual/io.rst
new file mode 100644
index 0000000..b6b805a
--- /dev/null
+++ b/doc/manual/io.rst
@@ -0,0 +1,343 @@
+
+.. _ch_convert:
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+Here we describe the data reading and conversion utilities included
+with the MNE software.
+
+.. note::
+    All IO functions in MNE-Python performing reading/conversion of MEG and
+    EEG data can be found in :mod:`mne.io` and start with `read_raw_`. All
+    supported data formats can be read in MNE-Python directly without first
+    saving it to fif.
+
+Importing MEG data
+##################
+
+This section describes reading and converting of various MEG data formats.
+
+
+Elekta NeuroMag (.fif)
+======================
+
+Neuromag Raw FIF files can be loaded using :func:`mne.io.read_raw_fif`.
+
+.. note::
+    If the data were recorded with MaxShield on and have not been processed
+    with MaxFilter, they may need to be loaded with
+    ``mne.io.read_raw_fif(..., allow_maxshield=True)``.
+
+.. note::
+    This file format also supports EEG data. An average reference will be added
+    by default on reading EEG data. To change this behavior call the readers
+    like this: ``mne.io.read_raw_fif(..., add_eeg_ref=False)``
+
+
+Importing 4-D Neuroimaging / BTI data
+=====================================
+
+MNE-Python includes the :func:`mne.io.read_raw_bti` to read and convert 4D / BTI data.
+This reader function will by default replace the original channel names,
+typically composed of the letter `A` and the channel number with Neuromag.
+To import the data, the following input files are mandatory:
+
+- A data file (typically c,rfDC)
+  containing the recorded MEG time-series.
+
+- A hs_file
+  containing the digitizer data.
+
+- A config file
+  containing acquisition information and metadata.
+
+By default :func:`mne.io.read_raw_bti` assumes these three files to be located
+in the same folder.
+
+.. note:: While reading the reference or compensation channels,
+          currently, the compensation weights are not processed.
+          As a result, the :class:`mne.io.Raw` object and the corresponding fif
+          file does not include information about the compensation channels
+          and the weights to be applied to realize software gradient
+          compensation. To augment the Magnes fif files with the necessary
+          information, the command line tools include the utilities
+          :ref:`mne_create_comp_data`, and :ref:`mne_add_to_meas_info`.
+          Including the compensation channel data is recommended but not
+          mandatory. If the data are saved in the Magnes system are already
+          compensated, there will be a small error in the forward calculations
+          whose significance has not been evaluated carefully at this time.
+
+
+Creating software gradient compensation data
+--------------------------------------------
+
+The utility mne_create_comp_data was
+written to create software gradient compensation weight data for
+4D Magnes fif files. This utility takes a text file containing the
+compensation data as input and writes the corresponding fif file
+as output. This file can be merged into the fif file containing
+4D Magnes data with the utility :ref:`mne_add_to_meas_info`.
+See :ref:`mne_create_comp_data` for command-line options.
+
+
+Importing CTF data
+==================
+
+The C command line tools include a utility :ref:`mne_ctf2fiff`,
+based on the BrainStorm Matlab code by Richard Leahy, John Mosher,
+and Sylvain Baillet, to convert data in CTF ds directory to fif
+format.
+
+
+Importing CTF Polhemus data
+===========================
+
+The CTF MEG systems store the Polhemus digitization data
+in text files. The utility :ref:`mne_ctf_dig2fiff` was
+created to convert these data files into the fif and hpts formats.
+
+
+.. _BEHDDFBI:
+
+Applying software gradient compensation
+---------------------------------------
+
+Since the software gradient compensation employed in CTF
+systems is a reversible operation, it is possible to change the
+compensation status of CTF data in the data files as desired. This
+section contains information about the technical details of the
+compensation procedure and a description of mne_compensate_data ,
+which is a utility to change the software gradient compensation
+state in evoked-response data files.
+
+The fif files containing CTF data converted using the utility mne_ctf2fiff contain
+several compensation matrices which are employed to suppress external disturbances
+with help of the reference channel data. The reference sensors are
+located further away from the brain than the helmet sensors and
+are thus measuring mainly the external disturbances rather than magnetic
+fields originating in the brain. Most often, a compensation matrix
+corresponding to a scheme nicknamed *Third-order gradient
+compensation* is employed.
+
+Let us assume that the data contain :math:`n_1` MEG
+sensor channels, :math:`n_2` reference sensor
+channels, and :math:`n_3` other channels.
+The data from all channels can be concatenated into a single vector
+
+.. math::    x = [x_1^T x_2^T x_3^T]^T\ ,
+
+where :math:`x_1`, :math:`x_2`,
+and :math:`x_3` are the data vectors corresponding
+to the MEG sensor channels, reference sensor channels, and other
+channels, respectively. The data before and after compensation,
+denoted here by :math:`x_{(0)}` and :math:`x_{(k)}`, respectively,
+are related by
+
+.. math::    x_{(k)} = M_{(k)} x_{(0)}\ ,
+
+where the composite compensation matrix is
+
+.. math::    M_{(k)} = \begin{bmatrix}
+		I_{n_1} & C_{(k)} & 0 \\
+		0 & I_{n_2} & 0 \\
+		0 & 0 & I_{n_3}
+		\end{bmatrix}\ .
+
+In the above, :math:`C_{(k)}` is a :math:`n_1` by :math:`n_2` compensation
+data matrix corresponding to compensation "grade" :math:`k`.
+It is easy to see that
+
+.. math::    M_{(k)}^{-1} = \begin{bmatrix}
+		I_{n_1} & -C_{(k)} & 0 \\
+		0 & I_{n_2} & 0 \\
+		0 & 0 & I_{n_3}
+		\end{bmatrix}\ .
+
+To convert from compensation grade :math:`k` to :math:`p` one
+can simply multiply the inverse of one compensate compensation matrix
+by another and apply the product to the data:
+
+.. math::    x_{(k)} = M_{(k)} M_{(p)}^{-1} x_{(p)}\ .
+
+This operation is performed by :ref:`mne_compensate_data`.
+
+
+Importing KIT MEG system data
+=============================
+
+MNE-Python includes the :func:`mne.io.read_raw_kit` and
+:func:`mne.read_epochs_kit` to read and convert KIT MEG data.
+This reader function will by default replace the original channel names,
+which typically with index starting with zero, with ones with an index starting with one.
+
+To import continuous data, only the input .sqd or .con file is needed. For epochs,
+an Nx3 matrix containing the event number/corresponding trigger value in the
+third column is needed.
+
+The following input files are optional:
+
+- A KIT marker file (mrk file) or an array-like
+  containing the locations of the HPI coils in the MEG device coordinate system.
+  These data are used together with the elp file to establish the coordinate
+  transformation between the head and device coordinate systems.
+
+- A Polhemus points file (elp file) or an array-like
+  containing the locations of the fiducials and the head-position
+  indicator (HPI) coils. These data are usually given in the Polhemus
+  head coordinate system.
+
+- A Polhemus head shape data file (hsp file) or an array-like
+  containing locations of additional points from the head surface.
+  These points must be given in the same coordinate system as that
+  used for the elp file.
+
+
+.. note:: The output fif file will use the Neuromag head coordinate system convention, see :ref:`BJEBIBAI`. A coordinate transformation between the Polhemus head coordinates and the Neuromag head coordinates is included.
+
+
+By default, KIT-157 systems assume the first 157 channels are the MEG channels,
+the next 3 channels are the reference compensation channels, and channels 160
+onwards are designated as miscellaneous input channels (MISC 001, MISC 002, etc.).
+By default, KIT-208 systems assume the first 208 channels are the MEG channels,
+the next 16 channels are the reference compensation channels, and channels 224
+onwards are designated as miscellaneous input channels (MISC 001, MISC 002, etc.).
+
+In addition, it is possible to synthesize the digital trigger channel (STI 014)
+from available analog trigger channel data by specifying the following parameters:
+
+- A list of trigger channels (stim) or default triggers with order: '<' | '>'
+  Channel-value correspondence when converting KIT trigger channels to a
+  Neuromag-style stim channel. By default, we assume the first eight miscellanous
+  channels are trigger channels. For '<', the largest values are assigned
+  to the first channel (little endian; default). For '>', the largest values are
+  assigned to the last channel (big endian). Can also be specified as a list of
+  trigger channel indexes.
+- The trigger channel slope (slope) : '+' | '-'
+  How to interpret values on KIT trigger channels when synthesizing a
+  Neuromag-style stim channel. With '+', a positive slope (low-to-high)
+  is interpreted as an event. With '-', a negative slope (high-to-low)
+  is interpreted as an event.
+- A stimulus threshold (stimthresh) : float
+  The threshold level for accepting voltage changes in KIT trigger
+  channels as a trigger event.
+
+The synthesized trigger channel data value at sample :math:`k` will
+be:
+
+.. math::    s(k) = \sum_{p = 1}^n {t_p(k) 2^{p - 1}}\ ,
+
+where :math:`t_p(k)` are the thresholded
+from the input channel data d_p(k):
+
+.. math::    t_p(k) = \Bigg\{ \begin{array}{l}
+		 0 \text{  if  } d_p(k) \leq t\\
+		 1 \text{  if  } d_p(k) > t
+	     \end{array}\ .
+
+The threshold value :math:`t` can
+be adjusted with the ``stimthresh`` parameter, see below.
+
+
+Importing EEG data
+##################
+
+The MNE package includes various functions and utilities for reading EEG
+data and electrode templates.
+
+Brainvision (.vhdr)
+===================
+
+Brainvision EEG files can be read in using :func:`mne.io.read_raw_brainvision`.
+
+European data format (.edf)
+===========================
+
+EDF and EDF+ files can be read in using :func:`mne.io.read_raw_edf`.
+
+http://www.edfplus.info/specs/edf.html
+
+EDF (European Data Format) and EDF+ are 16-bit formats
+http://www.edfplus.info/specs/edfplus.html
+
+The EDF+ files may contain an annotation channel which can
+be used to store trigger information. The Time-stamped Annotation
+Lists (TALs) on the annotation  data can be converted to a trigger
+channel (STI 014) using an annotation map file which associates
+an annotation label with a number on the trigger channel.
+
+Biosemi data format (.bdf)
+==========================
+
+The BDF format (http://www.biosemi.com/faq/file_format.htm) is a 24-bit variant
+of the EDF format used by the EEG systems manufactured by a company called
+BioSemi. It can also be read in using :func:`mne.io.read_raw_edf`.
+
+.. warning:: The data samples in a BDF file are represented in a 3-byte (24-bit) format. Since 3-byte raw data buffers are not presently supported in the fif format these data will be changed to 4-byte integers in the conversion.
+
+EGI simple binary (.egi)
+========================
+
+EGI simple binary files can be read in using :func:`mne.io.read_raw_egi`.
+The EGI raw files are simple binary files with a header and can be exported
+from using the EGI Netstation acquisition software.
+
+
+Importing EEG data saved in the Tufts University format
+=======================================================
+
+The command line utility :ref:`mne_tufts2fiff` was
+created in collaboration with Phillip Holcomb and Annette Schmid
+from Tufts University to import their EEG data to the MNE software.
+
+The Tufts EEG data is included in three files:
+
+- The raw data file containing the acquired
+  EEG data. The name of this file ends with the suffix ``.raw`` .
+
+- The calibration raw data file. This file contains known calibration
+  signals and is required to bring the data to physical units. The
+  name of this file ends with the suffix ``c.raw`` .
+
+- The electrode location information file. The name of this
+  file ends with the suffix ``.elp`` .
+
+See the options for the command-line utility :ref:`mne_tufts2fiff`.
+
+Converting eXimia EEG data
+==========================
+
+EEG data from the Nexstim eXimia system can be converted
+to the fif format with help of the :ref:`mne_eximia2fiff` script.
+It creates a BrainVision ``vhdr`` file and calls :ref:`mne_brain_vision2fiff`.
+
+
+
+Reading Electrode locations and Headshapes for EEG recordings
+#############################################################
+
+Some EEG formats (EGI, EDF/EDF+, BDF) neither contain electrode location
+information nor head shape digitization information. Therefore, this information
+has to be provided separately. For that purpose all readers have a montage
+parameter to read locations from standard electrode templates or a polhemus
+digitizer file. This can also be done post-hoc using the
+:func:`mne.io.Raw.set_montage` method of the Raw object in memory.
+
+
+When using the locations of the fiducial points the digitization data
+are converted to the MEG head coordinate system employed in the
+MNE software, see :ref:`BJEBIBAI`.
+
+
+Creating MNE data structures from arbitrary data (from memory)
+##############################################################
+
+Arbitrary (e.g., simulated or manually read in) raw data can be constructed
+from memory by making use of :class:`mne.io.RawArray`, :class:`mne.EpochsArray`
+or :class:`mne.EvokedArray` in combination with :func:`mne.create_info`.
+
+This functionality is illustrated in :ref:`example_io_plot_objects_from_arrays.py` .
+Using 3rd party libraries such as NEO (https://pythonhosted.org/neo/) in combination
+with these functions abundant electrophysiological file formats can be easily loaded
+into MNE.
diff --git a/doc/source/manual/matlab.rst b/doc/manual/matlab.rst
similarity index 99%
rename from doc/source/manual/matlab.rst
rename to doc/manual/matlab.rst
index c137345..0f3d37d 100644
--- a/doc/source/manual/matlab.rst
+++ b/doc/manual/matlab.rst
@@ -2,9 +2,13 @@
 
 .. _ch_matlab:
 
-==================
-The Matlab toolbox
-==================
+==============
+Matlab toolbox
+==============
+
+.. contents:: Contents
+   :local:
+   :depth: 2
 
 Overview
 ########
@@ -414,7 +418,8 @@ they are listed in :ref:`BGBEFADJ`.
     +--------------------------------+--------------------------------------------------------------+
     | mne_transform_coordinates      | Transform locations between different coordinate systems.    |
     |                                | This function uses the output file from                      |
-    |                                | mne_collect_transforms described in :ref:`BABBIFIJ` as input.|
+    |                                | mne_collect_transforms described in                          |
+    |                                | :ref:`mne_collect_transforms` as input.                      |
     +--------------------------------+--------------------------------------------------------------+
     | mne_transpose_named_matrix     | Create a transpose of a named matrix.                        |
     +--------------------------------+--------------------------------------------------------------+
@@ -430,7 +435,7 @@ they are listed in :ref:`BGBEFADJ`.
     | Function                       | Purpose                                                      |
     +================================+==============================================================+
     | mne_ex_average_epochs          | Example of averaging epoch data produced by mne_epochs2mat,  |
-    |                                | see :ref:`BEHFIDCB`.                                         |
+    |                                | see :ref:`mne_epochs2mat`.                                   |
     +--------------------------------+--------------------------------------------------------------+
     | mne_ex_cancel_noise            | Example of noise cancellation procedures.                    |
     +--------------------------------+--------------------------------------------------------------+
@@ -452,7 +457,7 @@ they are listed in :ref:`BGBEFADJ`.
     +--------------------------------+--------------------------------------------------------------+
 
 
-.. note:: In order for the inverse operator calculation to work correctly with data processed with the Elekta-Neuromag Maxfilter (TM) software, the so-called *processing history* block must be included in data files. Previous versions of the MNE Matlab functions did not copy processing history to files saved. As of March 30, 2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have been enhanced to include these data to the output file as appropriate. If you hav [...]
+.. note:: In order for the inverse operator calculation to work correctly with data processed with the Elekta-Neuromag Maxfilter (TM) software, the so-called *processing history* block must be included in data files. Previous versions of the MNE Matlab functions did not copy processing history to files saved. As of March 30, 2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have been enhanced to include these data to the output file as appropriate. If you hav [...]
 
 Some data structures
 ####################
@@ -1122,7 +1127,8 @@ The documented structures are:
     | sol_grad                | named       | The derivatives of the forward solution with respect to  |
     |                         | matrix      | the dipole location coordinates, see :ref:`BJEFEJJG`.    |
     |                         |             | This field is present only if the forward solution was   |
-    |                         |             | computed with the ``--grad`` option, see :ref:`BJEIGFAE`.|
+    |                         |             | computed with the ``--grad`` option, see                 |
+    |                         |             | :ref:`mne_forward_solution`.                             |
     +-------------------------+-------------+----------------------------------------------------------+
     | mri_head_t              | trans       | Transformation from the MRI coordinate frame to the      |
     |                         |             | (Neuromag) head coordinate frame.                        |
diff --git a/doc/source/manual/pics/CoordinateSystems.png b/doc/manual/pics/CoordinateSystems.png
similarity index 100%
rename from doc/source/manual/pics/CoordinateSystems.png
rename to doc/manual/pics/CoordinateSystems.png
diff --git a/doc/source/manual/pics/Digitizer-example.png b/doc/manual/pics/Digitizer-example.png
similarity index 100%
rename from doc/source/manual/pics/Digitizer-example.png
rename to doc/manual/pics/Digitizer-example.png
diff --git a/doc/source/manual/pics/Flowchart.png b/doc/manual/pics/Flowchart.png
similarity index 100%
rename from doc/source/manual/pics/Flowchart.png
rename to doc/manual/pics/Flowchart.png
diff --git a/doc/source/manual/pics/HeadCS.png b/doc/manual/pics/HeadCS.png
similarity index 100%
rename from doc/source/manual/pics/HeadCS.png
rename to doc/manual/pics/HeadCS.png
diff --git a/doc/manual/pics/ICA_primer.png b/doc/manual/pics/ICA_primer.png
new file mode 100644
index 0000000..91456c9
Binary files /dev/null and b/doc/manual/pics/ICA_primer.png differ
diff --git a/doc/source/manual/pics/cover.png b/doc/manual/pics/cover.png
similarity index 100%
rename from doc/source/manual/pics/cover.png
rename to doc/manual/pics/cover.png
diff --git a/doc/source/manual/pics/flat.png b/doc/manual/pics/flat.png
similarity index 100%
rename from doc/source/manual/pics/flat.png
rename to doc/manual/pics/flat.png
diff --git a/doc/source/manual/pics/morphed.png b/doc/manual/pics/morphed.png
similarity index 100%
rename from doc/source/manual/pics/morphed.png
rename to doc/manual/pics/morphed.png
diff --git a/doc/source/manual/pics/neuromag.png b/doc/manual/pics/neuromag.png
similarity index 100%
rename from doc/source/manual/pics/neuromag.png
rename to doc/manual/pics/neuromag.png
diff --git a/doc/source/manual/pics/orig.png b/doc/manual/pics/orig.png
similarity index 100%
rename from doc/source/manual/pics/orig.png
rename to doc/manual/pics/orig.png
diff --git a/doc/source/manual/pics/proj-off-on.png b/doc/manual/pics/proj-off-on.png
similarity index 100%
rename from doc/source/manual/pics/proj-off-on.png
rename to doc/manual/pics/proj-off-on.png
diff --git a/doc/source/manual/pics/title_page.png b/doc/manual/pics/title_page.png
similarity index 100%
rename from doc/source/manual/pics/title_page.png
rename to doc/manual/pics/title_page.png
diff --git a/doc/manual/preprocessing/bads.rst b/doc/manual/preprocessing/bads.rst
new file mode 100644
index 0000000..f1fd803
--- /dev/null
+++ b/doc/manual/preprocessing/bads.rst
@@ -0,0 +1,3 @@
+============
+Bad channels
+============
diff --git a/doc/manual/preprocessing/filter.rst b/doc/manual/preprocessing/filter.rst
new file mode 100644
index 0000000..c51195d
--- /dev/null
+++ b/doc/manual/preprocessing/filter.rst
@@ -0,0 +1,3 @@
+=========
+Filtering
+=========
diff --git a/doc/manual/preprocessing/ica.rst b/doc/manual/preprocessing/ica.rst
new file mode 100644
index 0000000..8040939
--- /dev/null
+++ b/doc/manual/preprocessing/ica.rst
@@ -0,0 +1,118 @@
+.. _ica:
+
+Independent Component Analysis (ICA)
+####################################
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+Many M/EEG signals including biological artifacts reflect non-Gaussian
+processes. Therefore PCA-based artifact rejection will likely perform worse at
+separating the signal from noise sources.
+MNE-Python supports identifying artifacts and latent components using temporal ICA.
+MNE-Python implements the :class:`mne.preprocessing.ICA` class that facilitates applying ICA
+to MEG and EEG data. It supports FastICA, the infomax, and the extended informax algorithm.
+It allows whitening the data using a fast randomized PCA algorithmd. Furthermore,
+multiple sensor types are supported by pre-whitening / rescaling. Bad data segments can be excluded
+from the model fitting by `reject` parameter in :class:`mne.preprocessing.ICA.fit`.
+
+For convenience, :class:`mne.preprocessing.ICA` implements methods for
+    - automated detection of ECG and EOG artifacts
+        - :meth:`mne.preprocessing.ICA.find_bads_ecg`
+        - :meth:`mne.preprocessing.ICA.find_bads_eog`
+    - visualization
+        - :meth:`mne.preprocessing.ICA.plot_components` for mapping the spatial sensitvity of a comopnent
+        - :meth:`mne.preprocessing.ICA.plot_sources` for component related time series
+        - :meth:`mne.preprocessing.ICA.plot_scores` for scores on which component detection is based upon
+        - :meth:`mne.preprocessing.ICA.plot_overlay` for showing differences between raw and processed data
+    - persistence
+        :meth:`mne.preprocessing.ICA.save` for writing the ICA solution into a fif file.
+    - integration with MNE-Python object system
+        :meth:`mne.preprocessing.ICA.get_sources` for putting component related time series in MNE data structures.
+
+Concepts
+========
+
+ICA finds directions in the feature space corresponding to projections with high non-Gaussianity.
+
+- not necessarily orthogonal in the original feature space, but orthogonal in the whitened feature space.
+- In contrast, PCA finds orthogonal directions in the raw feature
+  space that correspond to directions accounting for maximum variance.
+- or differently, if data only reflect Gaussian processes ICA and PCA are equivalent.
+
+
+**Example**: Imagine 3 instruments playing simultaneously and 3 microphones
+recording mixed signals. ICA can be used to recover the sources ie. what is played by each instrument.
+
+ICA employs a very simple model: $X = AS$ where $X$ is our observations, $A$ is the mixing matrix and $S$ is the vector of independent (latent) sources.
+
+The challenge is to recover A and S from X.
+
+
+First generate simulated data
+-----------------------------
+
+.. code:: python
+
+    import numpy as np
+    import matplotlib.pyplot as plt
+    from scipy import signal
+
+    from sklearn.decomposition import FastICA, PCA
+
+    np.random.seed(0)  # set seed for reproducible results
+    n_samples = 2000
+    time = np.linspace(0, 8, n_samples)
+
+    s1 = np.sin(2 * time)  # Signal 1 : sinusoidal signal
+    s2 = np.sign(np.sin(3 * time))  # Signal 2 : square signal
+    s3 = signal.sawtooth(2 * np.pi * time)  # Signal 3: sawtooth signal
+
+    S = np.c_[s1, s2, s3]
+    S += 0.2 * np.random.normal(size=S.shape)  # Add noise
+
+    S /= S.std(axis=0)  # Standardize data
+    # Mix data
+    A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]])  # Mixing matrix
+    X = np.dot(S, A.T)  # Generate observations
+
+Now try to recover the sources
+------------------------------
+
+.. code:: python
+
+    # compute ICA
+    ica = FastICA(n_components=3)
+    S_ = ica.fit_transform(X)  # Get the estimated sources
+    A_ = ica.mixing_  # Get estimated mixing matrix
+
+    # compute PCA
+    pca = PCA(n_components=3)
+    H = pca.fit_transform(X)  # estimate PCA sources
+
+    plt.figure(figsize=(9, 6))
+
+    models = [X, S, S_, H]
+    names = ['Observations (mixed signal)',
+             'True Sources',
+             'ICA estimated sources',
+             'PCA estimated sources']
+    colors = ['red', 'steelblue', 'orange']
+
+    for ii, (model, name) in enumerate(zip(models, names), 1):
+        plt.subplot(4, 1, ii)
+        plt.title(name)
+        for sig, color in zip(model.T, colors):
+            plt.plot(sig, color=color)
+
+    plt.tight_layout()
+    plt.show()
+
+
+
+.. image:: ../pics/ICA_primer.png
+
+
+:math:`\rightarrow` PCA fails at recovering our "instruments" since the
+related signals reflect non-Gaussian processes.
diff --git a/doc/manual/preprocessing/overview.rst b/doc/manual/preprocessing/overview.rst
new file mode 100644
index 0000000..99ea2d6
--- /dev/null
+++ b/doc/manual/preprocessing/overview.rst
@@ -0,0 +1,3 @@
+========
+Overview
+========
diff --git a/doc/manual/preprocessing/ssp.rst b/doc/manual/preprocessing/ssp.rst
new file mode 100644
index 0000000..06dace9
--- /dev/null
+++ b/doc/manual/preprocessing/ssp.rst
@@ -0,0 +1,128 @@
+.. _ssp:
+
+The Signal-Space Projection (SSP) method
+########################################
+
+The Signal-Space Projection (SSP) is one approach to rejection
+of external disturbances in software. This part presents some
+relevant details of this method.
+
+In MNE-Python SSS projection vectors can be computed using general
+purpose functions :func:` mne.compute_proj_epochs`,
+:func:`mne.compute_proj_evoked`, and :func:`mne.compute_proj_raw`.
+The general assumption these functions make is that the data passed contains
+raw, epochs or averages of the artifact. Typically this involves continues raw
+data of empty room recordings or averaged ECG or EOG artefacts.
+
+A second set of highlevel convenience functions is provided to compute
+projection vector for typical usecases. This includes
+:func:`mne.preprocessing.compute_proj_ecg` and
+:func:`mne.preprocessing.compute_proj_eog` for computing the ECG and EOG
+related artifact components, respectively.
+
+The underlying implementation can be found in :mod:`mne.preprocessing.ssp`.
+
+The following examples demonstrate how to use the SSP code:
+In :ref:`example_visualization_plot_evoked_delayed_ssp.py` and  :ref:`example_visualization_plot_evoked_topomap_delayed_ssp.py`
+SSPs are illustrated by toggling them in realtime.
+In :ref:`example_visualization_plot_ssp_projs_topomaps.py` and :ref:`example_visualization_plot_ssp_projs_sensitivity_map.py`
+the SSP sensitivities are visualized in sensor and source space, respectively.
+
+Background
+==========
+
+Concepts
+--------
+
+Unlike many other noise-cancellation approaches, SSP does
+not require additional reference sensors to record the disturbance
+fields. Instead, SSP relies on the fact that the magnetic field
+distributions generated by the sources in the brain have spatial
+distributions sufficiently different from those generated by external
+noise sources. Furthermore, it is implicitly assumed that the linear
+space spanned by the significant external noise patters has a low
+dimension.
+
+Without loss of generality we can always decompose any :math:`n`-channel
+measurement :math:`b(t)` into its signal and
+noise components as
+
+.. math::    b(t) = b_s(t) + b_n(t)
+
+Further, if we know that :math:`b_n(t)` is
+well characterized by a few field patterns :math:`b_1 \dotso b_m`,
+we can express the disturbance as
+
+.. math::    b_n(t) = Uc_n(t) + e(t)\ ,
+
+where the columns of :math:`U` constitute
+an orthonormal basis for :math:`b_1 \dotso b_m`, :math:`c_n(t)` is
+an :math:`m`-component column vector, and
+the error term :math:`e(t)` is small and does
+not exhibit any consistent spatial distributions over time, *i.e.*, :math:`C_e = E \{e e^T\} = I`.
+Subsequently, we will call the column space of :math:`U` the
+noise subspace. The basic idea of SSP is that we can actually find
+a small basis set :math:`b_1 \dotso b_m` such that the
+conditions described above are satisfied. We can now construct the
+orthogonal complement operator
+
+.. math::    P_{\perp} = I - UU^T
+
+and apply it to :math:`b(t)` yielding
+
+.. math::    b(t) = P_{\perp}b_s(t)\ ,
+
+since :math:`P_{\perp}b_n(t) = P_{\perp}Uc_n(t) \approx 0`. The projection operator :math:`P_{\perp}` is
+called the signal-space projection operator and generally provides
+considerable rejection of noise, suppressing external disturbances
+by a factor of 10 or more. The effectiveness of SSP depends on two
+factors:
+
+- The basis set :math:`b_1 \dotso b_m` should
+  be able to characterize the disturbance field patterns completely
+  and
+
+- The angles between the noise subspace space spanned by :math:`b_1 \dotso b_m` and the
+  signal vectors :math:`b_s(t)` should be as close
+  to :math:`\pi / 2` as possible.
+
+If the first requirement is not satisfied, some noise will
+leak through because :math:`P_{\perp}b_n(t) \neq 0`. If the any
+of the brain signal vectors :math:`b_s(t)` is
+close to the noise subspace not only the noise but also the signal
+will be attenuated by the application of :math:`P_{\perp}` and,
+consequently, there might by little gain in signal-to-noise ratio.
+:ref:`CACFGIEC` demonstrates the effect of SSP on the Vectorview
+magnetometer data. After the elimination of a three-dimensional
+noise subspace, the absolute value of the noise is dampened approximately
+by a factor of 10 and the covariance matrix becomes diagonally dominant.
+
+Since the signal-space projection modifies the signal vectors
+originating in the brain, it is necessary to apply the projection
+to the forward solution in the course of inverse computations. This
+is accomplished by mne_inverse_operator as
+described in :ref:`CBBDDBGF`. For more information on SSP,
+please consult the references listed in :ref:`CEGIEEBB`.
+
+.. _CACFGIEC:
+
+.. figure:: ../pics/proj-off-on.png
+    :alt: example of the effect of SSP
+
+    An example of the effect of SSP
+
+    The covariance matrix :math:`C_n` of noise data on the 102 Vectorview magnetometers was computed (a) before and (b) after the application of SSP with three-dimensional noise subspace. The plotted quantity is :math:`\sqrt {|(C_n)_{jk}|}`. Note that the vertical scale in (b) is ten times smaller than in (a).
+
+.. _BABFFCHF:
+
+Estimation of the noise subspace
+--------------------------------
+
+As described above, application of SSP requires the estimation
+of the signal vectors :math:`b_1 \dotso b_m` constituting
+the noise subspace. The most common approach, also implemented in mne_browse_raw is
+to compute a covariance matrix of empty room data, compute its eigenvalue
+decomposition, and employ the eigenvectors corresponding to the
+highest eigenvalues as basis for the noise subspace. It is also
+customary to use a separate set of vectors for magnetometers and
+gradiometers in the Vectorview system.
diff --git a/doc/manual/source_localization/covariance.rst b/doc/manual/source_localization/covariance.rst
new file mode 100644
index 0000000..553416d
--- /dev/null
+++ b/doc/manual/source_localization/covariance.rst
@@ -0,0 +1,5 @@
+.. _covariance:
+
+==========
+Covariance
+==========
diff --git a/doc/source/manual/forward.rst b/doc/manual/source_localization/forward.rst
similarity index 69%
rename from doc/source/manual/forward.rst
rename to doc/manual/source_localization/forward.rst
index eef1946..de122b8 100644
--- a/doc/source/manual/forward.rst
+++ b/doc/manual/source_localization/forward.rst
@@ -6,6 +6,11 @@
 The forward solution
 ====================
 
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+
 Overview
 ########
 
@@ -29,7 +34,7 @@ to the front, and the :math:`z` axis up.
 
 .. _CHDFFJIJ:
 
-.. figure:: pics/CoordinateSystems.png
+.. figure:: ../pics/CoordinateSystems.png
     :alt: MEG/EEG and MRI coordinate systems
 
     MEG/EEG and MRI coordinate systems
@@ -215,7 +220,7 @@ and
 The head and device coordinate systems
 ######################################
 
-.. figure:: pics/HeadCS.png
+.. figure:: ../pics/HeadCS.png
     :alt: Head coordinate system
 
     The head coordinate system
@@ -242,7 +247,7 @@ and :math:`y` axis pointing front. The :math:`z` axis
 is, again, normal to the :math:`xy` plane
 with positive direction up.
 
-.. note:: The above definition is identical to that    of the Neuromag MEG/EEG (head) coordinate system. However, in 4-D    Neuroimaging and CTF MEG systems the head coordinate frame definition    is different. The origin of the coordinate system is at the midpoint    of the left and right auricular points. The :math:`x` axis    passes through the nasion and the origin with positive direction    to the front. The :math:`y` axis is perpendicular    to the :math:`x` axis on the and lies in [...]
+.. note:: The above definition is identical to that    of the Neuromag MEG/EEG (head) coordinate system. However, in 4-D    Neuroimaging and CTF MEG systems the head coordinate frame definition    is different. The origin of the coordinate system is at the midpoint    of the left and right auricular points. The :math:`x` axis    passes through the nasion and the origin with positive direction    to the front. The :math:`y` axis is perpendicular    to the :math:`x` axis on the and lies in [...]
 
 .. _BEHCGJDD:
 
@@ -250,83 +255,10 @@ Creating a surface-based source space
 #####################################
 
 The fif format source space files containing the dipole locations
-and orientations are created with the utility mne_make_source_space .
-This utility is usually invoked by the convenience script mne_setup_source_space ,
+and orientations are created with the utility :ref:`mne_make_source_space`.
+This utility is usually invoked by the convenience script :ref:`mne_setup_source_space`,
 see :ref:`CIHCHDAE`.
 
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---subject <*name*>**
-
-    Name of the subject in SUBJECTS_DIR. In the absence of this option,
-    the SUBJECT environment variable will be consulted. If it is not
-    defined, mne_setup_source_space exits
-    with an error.
-
-**\---morph <*name*>**
-
-    Name of a subject in SUBJECTS_DIR. If this option is present, the source
-    space will be first constructed for the subject defined by the --subject
-    option or the SUBJECT environment variable and then morphed to this
-    subject. This option is useful if you want to create a source spaces
-    for several subjects and want to directly compare the data across
-    subjects at the source space vertices without any morphing procedure
-    afterwards. The drawback of this approach is that the spacing between
-    source locations in the "morph" subject is not going
-    to be as uniform as it would be without morphing.
-
-**\---surf <*name1*>: <*name2*>:...**
-
-    FreeSurfer surface file names specifying the source surfaces, separated
-    by colons.
-
-**\---spacing <*spacing/mm*>**
-
-    Specifies the approximate grid spacing of the source space in mm.
-
-**\---ico <*number*>**
-
-    Instead of using the traditional method for cortical surface decimation
-    it is possible to create the source space using the topology of
-    a recursively subdivided icosahedron ( <*number*> > 0)
-    or an octahedron ( <*number*>  < 0).
-    This method uses the cortical surface inflated to a sphere as a
-    tool to find the appropriate vertices for the source space. The
-    benefit of the ``--ico`` option is that the source space will have triangulation
-    information between the decimated vertices included, which some
-    future versions of MNE software may be able to utilize. The number
-    of triangles increases by a factor of four in each subdivision,
-    starting from 20 triangles in an icosahedron and 8 triangles in
-    an octahedron. Since the number of vertices on a closed surface
-    is :math:`n_{vert} = (n_{tri} + 4) / 2`, the number of vertices in
-    the *k* th subdivision of an icosahedron and an
-    octahedron are :math:`10 \cdot 4^k +2` and :math:`4_{k + 1} + 2`,
-    respectively. The recommended values for <*number*> and
-    the corresponding number of source space locations are listed in Table 3.1.
-
-**\---all**
-
-    Include all nodes to the output. The active dipole nodes are identified
-    in the fif file by a separate tag. If tri files were used as input
-    the output file will also contain information about the surface
-    triangulation. This option is always recommended to include complete
-    information.
-
-**\---src <*name*>**
-
-    Output file name. Use a name <*dir*>/<*name*>-src.fif
-
-.. note:: If both ``--ico`` and ``--spacing`` options    are present the later one on the command line takes precedence.
-
-.. note:: Due to the differences between the FreeSurfer    and MNE libraries, the number of source space points generated with    the ``--spacing`` option may be different between the current    version of MNE and versions 2.5 or earlier (using ``--spacing`` option    to mne_setup_source_space ) if    the FreeSurfer surfaces employ the (old) quadrangle format or if    there are topological defects on the surfaces. All new FreeSurfer    surfaces are specified as triangular tessellations a [...]
 
 .. _BJEFEHJI:
 
@@ -336,207 +268,27 @@ Creating a volumetric or discrete source space
 In addition to source spaces confined to a surface, the MNE
 software provides some support for three-dimensional source spaces
 bounded by a surface as well as source spaces comprised of discrete,
-arbitrarily located source points. The mne_volume_source_space utility
+arbitrarily located source points. The :ref:`mne_volume_source_space` utility
 assists in generating such source spaces.
 
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---surf <*name*>**
-
-    Specifies a FreeSurfer surface file containing the surface which
-    will be used as the boundary for the source space.
-
-**\---bem <*name*>**
-
-    Specifies a BEM file (ending in ``-bem.fif`` ). The inner
-    skull surface will be used as the boundary for the source space.
-
-**\---origin <*x/mm*> : <*y/mm*> : <*z/mm*>**
-
-    If neither of the two surface options described above is present,
-    the source space will be spherical with the origin at this location,
-    given in MRI (RAS) coordinates.
-
-**\---rad <*radius/mm*>**
-
-    Specifies the radius of a spherical source space. Default value
-    = 90 mm
-
-**\---grid <*spacing/mm*>**
-
-    Specifies the grid spacing in the source space.
-
-**\---mindist <*distance/mm*>**
-
-    Only points which are further than this distance from the bounding surface
-    are included. Default value = 5 mm.
-
-**\---exclude <*distance/mm*>**
-
-    Exclude points that are closer than this distance to the center
-    of mass of the bounding surface. By default, there will be no exclusion.
-
-**\---mri <*name*>**
-
-    Specifies a MRI volume (in mgz or mgh format).
-    If this argument is present the output source space file will contain
-    a (sparse) interpolation matrix which allows mne_volume_data2mri to
-    create an MRI overlay file, see :ref:`BEHDEJEC`.
-
-**\---pos <*name*>**
-
-    Specifies a name of a text file containing the source locations
-    and, optionally, orientations. Each line of the file should contain
-    3 or 6 values. If the number of values is 3, they indicate the source
-    location, in millimeters. The orientation of the sources will be
-    set to the z-direction. If the number of values is 6, the source
-    orientation will be parallel to the vector defined by the remaining
-    3 numbers on each line. With ``--pos`` , all of the options
-    defined above will be ignored. By default, the source position and
-    orientation data are assumed to be given in MRI coordinates.
-
-**\---head**
-
-    If this option is present, the source locations and orientations
-    in the file specified with the ``--pos`` option are assumed
-    to be given in the MEG head coordinates.
-
-**\---meters**
-
-    Indicates that the source locations in the file defined with the ``--pos`` option
-    are give in meters instead of millimeters.
-
-**\---src <*name*>**
-
-    Specifies the output file name. Use a name * <*dir*>/ <*name*>*-src.fif
-
-**\---all**
-
-    Include all vertices in the output file, not just those in use.
-    This option is implied when the ``--mri`` option is present.
-    Even with the ``--all`` option, only those vertices actually
-    selected will be marked to be "in use" in the
-    output source space file.
 
 .. _BEHCACCJ:
 
 Creating the BEM meshes
 #######################
 
-The mne_surf2bem utility
+The :ref:`mne_surf2bem` utility
 converts surface triangle meshes from ASCII and FreeSurfer binary
 file formats to the fif format. The resulting fiff file also contains
 conductivity information so that it can be employed in the BEM calculations.
+See command-line options in :ref:`mne_surf2bem`.
 
-.. note:: The utility mne_tri2fiff previously    used for this task has been replaced by mne_surf2bem .
-
-.. note:: The convenience script mne_setup_forward_model described in :ref:`CIHDBFEG` calls mne_surf2bem with    the appropriate options.
-
-.. note:: The vertices of all surfaces should be given    in the MRI coordinate system.
-
-Command-line options
-====================
-
-This program has the following
-command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---surf <*name*>**
-
-    Specifies a FreeSurfer binary format surface file. Before specifying the
-    next surface (``--surf`` or ``--tri`` options)
-    details of the surface specification can be given with the options
-    listed in :ref:`BEHCDICC`.
-
-**\---tri <*name*>**
-
-    Specifies a text format surface file. Before specifying the next
-    surface (``--surf`` or ``--tri`` options) details
-    of the surface specification can be given with the options listed
-    in :ref:`BEHCDICC`. The format of these files is described
-    in :ref:`BEHDEFCD`.
-
-**\---check**
-
-    Check that the surfaces are complete and that they do not intersect. This
-    is a recommended option. For more information, see :ref:`BEHCBDDE`.
-
-**\---checkmore**
-
-    In addition to the checks implied by the ``--check`` option,
-    check skull and skull thicknesses. For more information, see :ref:`BEHCBDDE`.
-
-**\---fif <*name*>**
-
-    The output fif file containing the BEM. These files normally reside in
-    the bem subdirectory under the subject's mri data. A name
-    ending with ``-bem.fif`` is recommended.
-
-.. _BEHCDICC:
-
-Surface options
-===============
-
-These options can be specified after each ``--surf`` or ``--tri`` option
-to define details for the corresponding surface.
+.. note:: The utility ``mne_tri2fiff`` previously used for this task has been replaced by :ref:`mne_surf2bem`.
 
-**\---swap**
+.. note:: The convenience script :ref:`mne_setup_forward_model` described in :ref:`CIHDBFEG` calls :ref:`mne_surf2bem` with the appropriate options.
 
-    Swap the ordering or the triangle vertices. The standard convention in
-    the MNE software is to have the vertices ordered so that the vector
-    cross product of the vectors from vertex 1 to 2 and 1 to 3 gives the
-    direction of the outward surface normal. Text format triangle files
-    produced by the some software packages have an opposite order. For
-    these files, the ``--swap`` . option is required. This option does
-    not have any effect on the interpretation of the FreeSurfer surface
-    files specified with the ``--surf`` option.
+.. note:: The vertices of all surfaces should be given in the MRI coordinate system.
 
-**\---sigma <*value*>**
-
-    The conductivity of the compartment inside this surface in S/m.
-
-**\---shift <*value/mm*>**
-
-    Shift the vertices of this surface by this amount, given in mm,
-    in the outward direction, *i.e.*, in the positive
-    vertex normal direction.
-
-**\---meters**
-
-    The vertex coordinates of this surface are given in meters instead
-    of millimeters. This option applies to text format files only. This
-    definition does not affect the units of the shift option.
-
-**\---id <*number*>**
-
-    Identification number to assign to this surface. (1 = inner skull, 3
-    = outer skull, 4 = scalp).
-
-**\---ico <*number*>**
-
-    Downsample the surface to the designated subdivision of an icosahedron.
-    This option is relevant (and required) only if the triangulation
-    is isomorphic with a recursively subdivided icosahedron. For example,
-    the surfaces produced by with mri_watershed are
-    isomorphic with the 5th subdivision of a an icosahedron thus containing 20480
-    triangles. However, this number of triangles is too large for present
-    computers. Therefore, the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
-    in 1280 triangles. The recommended choice is ``--ico 4`` .
 
 .. _BEHDEFCD:
 
@@ -621,40 +373,18 @@ topology checks are performed:
   may indicate that the vertex coordinates have been specified in
   meters instead of millimeters.
 
+
 .. _CHDJFHEB:
 
 Computing the BEM geometry data
 ###############################
 
-The utility mne_prepare_bem_model computes
+The utility :ref:`mne_prepare_bem_model` computes
 the geometry information for BEM. This utility is usually invoked
-by the convenience script mne_setup_forward_model ,
-see :ref:`CIHDBFEG`. The command-line options are:
+by the convenience script :ref:`mne_setup_forward_model`,
+see :ref:`CIHDBFEG`. The command-line options are listed under
+:ref:`mne_prepare_bem_model`.
 
-**\---bem <*name*>**
-
-    Specify the name of the file containing the triangulations of the BEM
-    surfaces and the conductivities of the compartments. The standard
-    ending for this file is ``-bem.fif`` and it is produced
-    either with the utility mne_surf2bem (:ref:`BEHCACCJ`) or the convenience script mne_setup_forward_model ,
-    see :ref:`CIHDBFEG`.
-
-**\---sol <*name*>**
-
-    Specify the name of the file containing the triangulation and conductivity
-    information together with the BEM geometry matrix computed by mne_prepare_bem_model .
-    The standard ending for this file is ``-bem-sol.fif`` .
-
-**\---method <*approximation method*>**
-
-    Select the BEM approach. If <*approximation method*> is ``constant`` ,
-    the BEM basis functions are constant functions on each triangle
-    and the collocation points are the midpoints of the triangles. With ``linear`` ,
-    the BEM basis functions are linear functions on each triangle and
-    the collocation points are the vertices of the triangulation. This
-    is the preferred method to use. The accuracy will be the same or
-    better than in the constant collocation approach with about half
-    the number of unknowns in the BEM equations.
 
 .. _BJEIAEIE:
 
@@ -1025,166 +755,14 @@ Computing the forward solution
 Purpose
 =======
 
-Instead of using the convenience script mne_do_forward_solution it
-is also possible to invoke the forward solution computation program mne_forward_solution directly.
+Instead of using the convenience script :ref:`mne_do_forward_solution` it
+is also possible to invoke the forward solution computation program :ref:`mne_forward_solution` directly.
 In this approach, the convenience of the automatic file naming conventions
-present in mne_do_forward_solution are
+present in :ref:`mne_do_forward_solution` are
 lost. However, there are some special-purpose options available
-in mne_forward_solution only.
-Please refer to :ref:`BABCHEJD` for information on mne_do_forward_solution.
-
-.. _BJEIGFAE:
-
-Command line options
-====================
-
-mne_forward_solution accepts
-the following command-line options:
-
-**\---src <*name*>**
-
-    Source space name to use. The name of the file must be specified exactly,
-    including the directory. Typically, the source space files reside
-    in $SUBJECTS_DIR/$SUBJECT/bem.
-
-**\---bem <*name*>**
-
-    Specifies the BEM to be used. These files end with bem.fif or bem-sol.fif and
-    reside in $SUBJECTS_DIR/$SUBJECT/bem. The former file contains only
-    the BEM surface information while the latter files contain the geometry
-    information precomputed with mne_prepare_bem_model ,
-    see :ref:`CHDJFHEB`. If precomputed geometry is not available,
-    the linear collocation solution will be computed by mne_forward_solution .
-
-**\---origin <*x/mm*> : <*x/mm*> : <*z/mm*>**
-
-    Indicates that the sphere model should be used in the forward calculations.
-    The origin is specified in MEG head coordinates unless the ``--mricoord`` option
-    is present. The MEG sphere model solution computed using the analytical
-    Sarvas formula. For EEG, an approximative solution described in
-
-**\---eegmodels <*name*>**
-
-    This option is significant only if the sphere model is used and
-    EEG channels are present. The specified file contains specifications
-    of the EEG sphere model layer structures as detailed in :ref:`CHDIAFIG`. If this option is absent the file ``$HOME/.mne/EEG_models`` will
-    be consulted if it exists.
-
-**\---eegmodel <*model name*>**
-
-    Specifies the name of the sphere model to be used for EEG. If this option
-    is missing, the model Default will
-    be employed, see :ref:`CHDIAFIG`.
-
-**\---eegrad <*radius/mm*>**
-
-    Specifies the radius of the outermost surface (scalp) of the EEG sphere
-    model, see :ref:`CHDIAFIG`. The default value is 90 mm.
-
-**\---eegscalp**
-
-    Scale the EEG electrode locations to the surface of the outermost sphere
-    when using the sphere model.
-
-**\---accurate**
-
-    Use accurate MEG sensor coil descriptions. This is the recommended
-    choice. More information
-
-**\---fixed**
-
-    Compute the solution for sources normal to the cortical mantle only. This
-    option should be used only for surface-based and discrete source
-    spaces.
-
-**\---all**
-
-    Compute the forward solution for all vertices on the source space.
-
-**\---label <*name*>**
-
-    Compute the solution only for points within the specified label. Multiple
-    labels can be present. The label files should end with ``-lh.label`` or ``-rh.label`` for
-    left and right hemisphere label files, respectively. If ``--all`` flag
-    is present, all surface points falling within the labels are included.
-    Otherwise, only decimated points with in the label are selected.
-
-**\---mindist <*dist/mm*>**
-
-    Omit source space points closer than this value to the inner skull surface.
-    Any source space points outside the inner skull surface are automatically
-    omitted. The use of this option ensures that numerical inaccuracies
-    for very superficial sources do not cause unexpected effects in
-    the final current estimates. Suitable value for this parameter is
-    of the order of the size of the triangles on the inner skull surface.
-    If you employ the seglab software to create the triangulations, this
-    value should be about equal to the wish for the side length of the
-    triangles.
-
-**\---mindistout <*name*>**
-
-    Specifies a file name to contain the coordinates of source space points
-    omitted due to the ``--mindist`` option.
-
-**\---mri <*name*>**
-
-    The name of the MRI description file containing the MEG/MRI coordinate
-    transformation. This file was saved as part of the alignment procedure
-    outlined in :ref:`CHDBEHDC`. These files typically reside in ``$SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets`` .
-
-**\---trans	 <*name*>**
-
-    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
-    from head to mri coordinates. With ``--trans``, ``--mri`` option is not
-    required.
-
-**\---notrans**
-
-    The MEG/MRI coordinate transformation is taken as the identity transformation, *i.e.*,
-    the two coordinate systems are the same. This option is useful only
-    in special circumstances. If more than one of the ``--mri`` , ``--trans`` ,
-    and ``--notrans`` options are specified, the last one remains
-    in effect.
-
-**\---mricoord**
-
-    Do all computations in the MRI coordinate system. The forward solution
-    matrix is not affected by this option if the source orientations
-    are fixed to be normal to the cortical mantle. If all three source components
-    are included, the forward three source orientations parallel to
-    the coordinate axes is computed. If ``--mricoord`` is present, these
-    axes correspond to MRI coordinate system rather than the default
-    MEG head coordinate system. This option is useful only in special
-    circumstances.
-
-**\---meas <*name*>**
-
-    This file is the measurement fif file or an off-line average file
-    produced thereof. It is recommended that the average file is employed for
-    evoked-response data and the original raw data file otherwise. This
-    file provides the MEG sensor locations and orientations as well as
-    EEG electrode locations as well as the coordinate transformation between
-    the MEG device coordinates and MEG head-based coordinates.
-
-**\---fwd <*name*>**
-
-    This file will contain the forward solution as well as the coordinate transformations,
-    sensor and electrode location information, and the source space
-    data. A name of the form <*name*>-fwd.fif is
-    recommended.
-
-**\---meg**
-
-    Compute the MEG forward solution.
-
-**\---eeg**
-
-    Compute the EEG forward solution.
-
-**\---grad**
-
-    Include the derivatives of the fields with respect to the dipole
-    position coordinates to the output, see :ref:`BJEFEJJG`.
+in :ref:`mne_forward_solution` only.
+Please refer to :ref:`BABCHEJD` for information on :ref:`mne_do_forward_solution`.
+See :ref:`mne_forward_solution` for command-line options.
 
 Implementation of software gradient compensation
 ================================================
@@ -1305,33 +883,7 @@ Purpose
 
 One possibility to make a grand average over several runs
 of a experiment is to average the data across runs and average the
-forward solutions accordingly. For this purpose, mne_average_forward_solutions computes a
+forward solutions accordingly. For this purpose, :ref:`mne_average_forward_solutions` computes a
 weighted average of several forward solutions. The program averages both
 MEG and EEG forward solutions. Usually the EEG forward solution is
 identical across runs because the electrode locations do not change.
-
-Command line options
-====================
-
-mne_average_forward_solutions accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fwd <*name*> :[ <*weight*> ]**
-
-    Specifies a forward solution to include. If no weight is specified,
-    1.0 is assumed. In the averaging process the weights are divided
-    by their sum. For example, if two forward solutions are averaged
-    and their specified weights are 2 and 3, the average is formed with
-    a weight of 2/5 for the first solution and 3/5 for the second one.
-
-**\---out <*name*>**
-
-    Specifies the output file which will contain the averaged forward solution.
diff --git a/doc/manual/source_localization/inverse.rst b/doc/manual/source_localization/inverse.rst
new file mode 100644
index 0000000..a13e55b
--- /dev/null
+++ b/doc/manual/source_localization/inverse.rst
@@ -0,0 +1,520 @@
+
+
+.. _ch_mne:
+
+=====================
+The current estimates
+=====================
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+Overview
+########
+
+This Chapter describes the computation of the minimum-norm
+estimates. This is accomplished with two programs: *mne_inverse_operator* and *mne_make_movie*.
+The chapter starts with a mathematical description of the method,
+followed by description of the two software modules. The interactive
+program for inspecting data and inverse solutions, mne_analyze ,
+is covered in :ref:`ch_interactive_analysis`.
+
+.. _CBBDJFBJ:
+
+Minimum-norm estimates
+######################
+
+This section describes the mathematical details of the calculation
+of minimum-norm estimates. In Bayesian sense, the ensuing current
+distribution is the maximum a posteriori (MAP) estimate under the
+following assumptions:
+
+- The viable locations of the currents
+  are constrained to the cortex. Optionally, the current orientations
+  can be fixed to be normal to the cortical mantle.
+
+- The amplitudes of the currents have a Gaussian prior distribution
+  with a known source covariance matrix.
+
+- The measured data contain additive noise with a Gaussian distribution with
+  a known covariance matrix. The noise is not correlated over time.
+
+The linear inverse operator
+===========================
+
+The measured data in the source estimation procedure consists
+of MEG and EEG data, recorded on a total of N channels. The task
+is to estimate a total of M strengths of sources located on the
+cortical mantle. If the number of source locations is P, M = P for
+fixed-orientation sources and M = 3P if the source orientations
+are unconstrained. The regularized linear inverse operator following
+from the Bayesian approach is given by the :math:`M \times N` matrix
+
+.. math::    M = R' G^T (G R' G^T + C)^{-1}\ ,
+
+where G is the gain matrix relating the source strengths
+to the measured MEG/EEG data, :math:`C` is the data noise-covariance matrix
+and :math:`R'` is the source covariance matrix.
+The dimensions of these matrices are :math:`N \times M`, :math:`N \times N`,
+and :math:`M \times M`, respectively. The :math:`M \times 1` source-strength
+vector is obtained by multiplying the :math:`N \times 1` data
+vector by :math:`M`.
+
+The expected value of the current amplitudes at time *t* is
+then given by :math:`\hat{j}(t) = Mx(t)`, where :math:`x(t)` is
+a vector containing the measured MEG and EEG data values at time *t*.
+
+.. _CBBHAAJJ:
+
+Regularization
+==============
+
+The a priori variance of the currents is, in practise, unknown.
+We can express this by writing :math:`R' = R/ \lambda^2`,
+which yields the inverse operator
+
+.. math::    M = R G^T (G R G^T + \lambda^2 C)^{-1}\ ,
+
+where the unknown current amplitude is now interpreted in
+terms of the regularization parameter :math:`\lambda^2`.
+Small :math:`\lambda^2` corresponds to large current amplitudes
+and complex estimate current patterns while a large :math:`\lambda^2` means the
+amplitude of the current is limited and a simpler, smooth, current
+estimate is obtained.
+
+We can arrive in the regularized linear inverse operator
+also by minimizing the cost function
+
+.. math::    S = \tilde{e}^T \tilde{e} + \lambda^2 j^T R^{-1} j\ ,
+
+where the first term consists of the difference between the
+whitened measured data (see :ref:`CHDDHAGE`) and those predicted
+by the model while the second term is a weighted-norm of the current
+estimate. It is seen that, with increasing :math:`\lambda^2`,
+the source term receive more weight and larger discrepancy between
+the measured and predicted data is tolerable.
+
+.. _CHDDHAGE:
+
+Whitening and scaling
+=====================
+
+The MNE software employs data whitening so that a 'whitened' inverse operator
+assumes the form
+
+.. math::    \tilde{M} = R \tilde{G}^T (\tilde{G} R \tilde{G}^T + I)^{-1}\ ,
+
+where :math:`\tilde{G} = C^{-^1/_2}G` is the spatially
+whitened gain matrix. The expected current values are :math:`\hat{j} = Mx(t)`,
+where :math:`x(t) = C^{-^1/_2}x(t)` is a the whitened measurement
+vector at *t*. The spatial whitening operator
+is obtained with the help of the eigenvalue decomposition :math:`C = U_C \Lambda_C^2 U_C^T` as :math:`C^{-^1/_2} = \Lambda_C^{-1} U_C^T`.
+In the MNE software the noise-covariance matrix is stored as the
+one applying to raw data. To reflect the decrease of noise due to
+averaging, this matrix, :math:`C_0`, is scaled
+by the number of averages, :math:`L`, *i.e.*, :math:`C = C_0 / L`.
+
+As shown above, regularization of the inverse solution is
+equivalent to a change in the variance of the current amplitudes
+in the Bayesian *a priori* distribution.
+
+Convenient choice for the source-covariance matrix :math:`R` is
+such that :math:`\text{trace}(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1`. With this choice we
+can approximate :math:`\lambda^2 \sim 1/SNR`, where SNR is
+the (power) signal-to-noise ratio of the whitened data.
+
+.. note:: The definition of the signal to noise-ratio/ :math:`\lambda^2` relationship    given above works nicely for the whitened forward solution. In the    un-whitened case scaling with the trace ratio :math:`\text{trace}(GRG^T) / \text{trace}(C)` does not make sense, since the diagonal elements summed have, in general,    different units of measure. For example, the MEG data are expressed    in T or T/m whereas the unit of EEG is Volts.
+
+.. _CBBHEGAB:
+
+Regularization of the noise-covariance matrix
+=============================================
+
+Since finite amount of data is usually available to compute
+an estimate of the noise-covariance matrix :math:`C`,
+the smallest eigenvalues of its estimate are usually inaccurate
+and smaller than the true eigenvalues. Depending on the seriousness
+of this problem, the following quantities can be affected:
+
+- The model data predicted by the current
+  estimate,
+
+- Estimates of signal-to-noise ratios, which lead to estimates
+  of the required regularization, see :ref:`CBBHAAJJ`,
+
+- The estimated current values, and
+
+- The noise-normalized estimates, see :ref:`CBBEAICH`.
+
+Fortunately, the latter two are least likely to be affected
+due to regularization of the estimates. However, in some cases especially
+the EEG part of the noise-covariance matrix estimate can be deficient, *i.e.*,
+it may possess very small eigenvalues and thus regularization of
+the noise-covariance matrix is advisable.
+
+The MNE software accomplishes the regularization by replacing
+a noise-covariance matrix estimate :math:`C` with
+
+.. math::    C' = C + \sum_k {\varepsilon_k \bar{\sigma_k}^2 I^{(k)}}\ ,
+
+where the index :math:`k` goes across
+the different channel groups (MEG planar gradiometers, MEG axial
+gradiometers and magnetometers, and EEG), :math:`\varepsilon_k` are
+the corresponding regularization factors, :math:`\bar{\sigma_k}` are
+the average variances across the channel groups, and :math:`I^{(k)}` are
+diagonal matrices containing ones at the positions corresponding
+to the channels contained in each channel group. The values :math:`\varepsilon_k` can
+be adjusted with the regularization options ``--magreg`` , ``--gradreg`` ,
+and ``--eegreg`` specified at the time of the inverse operator
+decomposition, see :ref:`CBBDDBGF`. The convenience script mne_do_inverse_solution has
+the ``--magreg`` and ``--gradreg`` combined to
+a single option, ``--megreg`` , see :ref:`CIHCFJEI`.
+Suggested range of values for :math:`\varepsilon_k` is :math:`0.05 \dotso 0.2`.
+
+.. _CHDBEHBC:
+
+Computation of the solution
+===========================
+
+The most straightforward approach to calculate the MNE is
+to employ expression for the original or whitened inverse operator
+directly. However, for computational convenience we prefer to take
+another route, which employs the singular-value decomposition (SVD)
+of the matrix
+
+.. math::    A = \tilde{G} R^{^1/_2} = U \Lambda V^T
+
+where the superscript :math:`^1/_2` indicates a
+square root of :math:`R`. For a diagonal matrix,
+one simply takes the square root of :math:`R` while
+in the more general case one can use the Cholesky factorization :math:`R = R_C R_C^T` and
+thus :math:`R^{^1/_2} = R_C`.
+
+With the above SVD it is easy to show that
+
+.. math::    \tilde{M} = R^{^1/_2} V \Gamma U^T
+
+where the elements of the diagonal matrix :math:`\Gamma` are
+
+.. math::    \gamma_k = \frac{1}{\lambda_k} \frac{\lambda_k^2}{\lambda_k^2 + \lambda^2}\ .
+
+With :math:`w(t) = U^T C^{-^1/_2} x(t)` the expression for
+the expected current is
+
+.. math::    \hat{j}(t) = R^C V \Gamma w(t) = \sum_k {\bar{v_k} \gamma_k w_k(t)}\ ,
+
+where :math:`\bar{v_k} = R^C v_k`, :math:`v_k` being
+the :math:`k` th column of :math:`V`. It is thus seen that the current estimate is
+a weighted sum of the 'modified' eigenleads :math:`v_k`.
+
+It is easy to see that :math:`w(t) \propto \sqrt{L}`.
+To maintain the relation :math:`(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1` when :math:`L` changes
+we must have :math:`R \propto 1/L`. With this approach, :math:`\lambda_k` is
+independent of  :math:`L` and, for fixed :math:`\lambda`,
+we see directly that :math:`j(t)` is independent
+of :math:`L`.
+
+.. _CBBEAICH:
+
+Noise normalization
+===================
+
+The noise-normalized linear estimates introduced by Dale
+et al. require division of the expected current amplitude by its
+variance. Noise normalization serves three purposes:
+
+- It converts the expected current value
+  into a dimensionless statistical test variable. Thus the resulting
+  time and location dependent values are often referred to as dynamic
+  statistical parameter maps (dSPM).
+
+- It reduces the location bias of the estimates. In particular,
+  the tendency of the MNE to prefer superficial currents is eliminated.
+
+- The width of the point-spread function becomes less dependent
+  on the source location on the cortical mantle. The point-spread
+  is defined as the MNE resulting from the signals coming from a point
+  current source (a current dipole) located at a certain point on
+  the cortex.
+
+In practice, noise normalization requires the computation
+of the diagonal elements of the matrix
+
+.. math::    M C M^T = \tilde{M} \tilde{M}^T\ .
+
+With help of the singular-value decomposition approach we
+see directly that
+
+.. math::    \tilde{M} \tilde{M}^T\ = \bar{V} \Gamma^2 \bar{V}^T\ .
+
+Under the conditions expressed at the end of :ref:`CHDBEHBC`, it follows that the *t*-statistic values associated
+with fixed-orientation sources) are thus proportional to :math:`\sqrt{L}` while
+the *F*-statistic employed with free-orientation sources is proportional
+to :math:`L`, correspondingly.
+
+.. note:: A section discussing statistical considerations    related to the noise normalization procedure will be added to this    manual in one of the subsequent releases.
+
+.. note:: The MNE software usually computes the square    roots of the F-statistic to be displayed on the inflated cortical    surfaces. These are also proportional to :math:`\sqrt{L}`.
+
+.. _CHDCACDC:
+
+Predicted data
+==============
+
+Under noiseless conditions the SNR is infinite and thus leads
+to :math:`\lambda^2 = 0` and the minimum-norm estimate
+explains the measured data perfectly. Under realistic conditions,
+however, :math:`\lambda^2 > 0` and there is a misfit
+between measured data and those predicted by the MNE. Comparison
+of the predicted data, here denoted by :math:`x(t)`,
+and measured one can give valuable insight on the correctness of
+the regularization applied.
+
+In the SVD approach we easily find
+
+.. math::    \hat{x}(t) = G \hat{j}(t) = C^{^1/_2} U \Pi w(t)\ ,
+
+where the diagonal matrix :math:`\Pi` has
+elements :math:`\pi_k = \lambda_k \gamma_k` The predicted data is
+thus expressed as the weighted sum of the 'recolored eigenfields' in :math:`C^{^1/_2} U`.
+
+.. _CBBDBHDI:
+
+Cortical patch statistics
+=========================
+
+If the ``--cps`` option was used in source space
+creation (see :ref:`CIHCHDAE`) or if mne_add_patch_info described
+in :ref:`mne_add_patch_info` was run manually the source space file
+will contain for each vertex of the cortical surface the information
+about the source space point closest to it as well as the distance
+from the vertex to this source space point. The vertices for which
+a given source space point is the nearest one define the cortical
+patch associated with with the source space point. Once these data
+are available, it is straightforward to compute the following cortical
+patch statistics (CPS) for each source location :math:`d`:
+
+- The average over the normals of at the
+  vertices in a patch, :math:`\bar{n_d}`,
+
+- The areas of the patches, :math:`A_d`,
+  and
+
+- The average deviation of the vertex normals in a patch from
+  their average, :math:`\sigma_d`, given in degrees.
+
+The orientation constraints
+===========================
+
+The principal sources of MEG and EEG signals are generally
+believed to be postsynaptic currents in the cortical pyramidal neurons.
+Since the net primary current associated with these microscopic
+events is oriented normal to the cortical mantle, it is reasonable
+to use the cortical normal orientation as a constraint in source
+estimation. In addition to allowing completely free source orientations,
+the MNE software implements three orientation constraints based
+of the surface normal data:
+
+- Source orientation can be rigidly fixed
+  to the surface normal direction (the ``--fixed`` option).
+  If cortical patch statistics are available the average normal over
+  each patch, :math:`\bar{n_d}`, are used to define
+  the source orientation. Otherwise, the vertex normal at the source
+  space location is employed.
+
+- A *location independent or fixed loose orientation
+  constraint* (fLOC) can be employed (the ``--loose`` option).
+  In this approach, a source coordinate system based on the local
+  surface orientation at the source location is employed. By default,
+  the three columns of the gain matrix G, associated with a given
+  source location, are the fields of unit dipoles pointing to the
+  directions of the x, y, and z axis of the coordinate system employed
+  in the forward calculation (usually the MEG head coordinate frame).
+  For LOC the orientation is changed so that the first two source
+  components lie in the plane normal to the surface normal at the source
+  location and the third component is aligned with it. Thereafter, the
+  variance of the source components tangential to the cortical surface are
+  reduced by a factor defined by the ``--loose`` option.
+
+- A *variable loose orientation constraint* (vLOC)
+  can be employed (the ``--loosevar`` option). This is similar
+  to fLOC except that the value given with the ``--loosevar`` option
+  will be multiplied by :math:`\sigma_d`, defined above.
+
+.. _CBBDFJIE:
+
+Depth weighting
+===============
+
+The minimum-norm estimates have a bias towards superficial
+currents. This tendency can be alleviated by adjusting the source
+covariance matrix :math:`R` to favor deeper source locations. In the depth
+weighting scheme employed in MNE analyze, the elements of :math:`R` corresponding
+to the :math:`p` th source location are be
+scaled by a factor
+
+.. math::    f_p = (g_{1p}^T g_{1p} + g_{2p}^T g_{2p} + g_{3p}^T g_{3p})^{-\gamma}\ ,
+
+where :math:`g_{1p}`, :math:`g_{2p}`, and :math:`g_{3p}` are the three columns
+of :math:`G` corresponding to source location :math:`p` and :math:`\gamma` is
+the order of the depth weighting, specified with the ``--weightexp`` option
+to mne_inverse_operator . The
+maximal amount of depth weighting can be adjusted ``--weightlimit`` option.
+
+.. _CBBDIJHI:
+
+fMRI-guided estimates
+=====================
+
+The fMRI weighting in MNE software means that the source-covariance matrix
+is modified to favor areas of significant fMRI activation. For this purpose,
+the fMRI activation map is thresholded first at the value defined by
+the ``--fmrithresh`` option to mne_do_inverse_operator or mne_inverse_operator .
+Thereafter, the source-covariance matrix values corresponding to
+the the sites under the threshold are multiplied by :math:`f_{off}`, set
+by the ``--fmrioff`` option.
+
+It turns out that the fMRI weighting has a strong influence
+on the MNE but the noise-normalized estimates are much less affected
+by it.
+
+.. _CBBDGIAE:
+
+Effective number of averages
+############################
+
+It is often the case that the epoch to be analyzed is a linear
+combination over conditions rather than one of the original averages
+computed. As stated above, the noise-covariance matrix computed
+is originally one corresponding to raw data. Therefore, it has to
+be scaled correctly to correspond to the actual or effective number
+of epochs in the condition to be analyzed. In general, we have
+
+.. math::    C = C_0 / L_{eff}
+
+where :math:`L_{eff}` is the effective
+number of averages. To calculate :math:`L_{eff}` for
+an arbitrary linear combination of conditions
+
+.. math::    y(t) = \sum_{i = 1}^n {w_i x_i(t)}
+
+we make use of the the fact that the noise-covariance matrix
+
+.. math::    C_y = \sum_{i = 1}^n {w_i^2 C_{x_i}} = C_0 \sum_{i = 1}^n {w_i^2 / L_i}
+
+which leads to
+
+.. math::    1 / L_{eff} = \sum_{i = 1}^n {w_i^2 / L_i}
+
+An important special case  of the above is a weighted average,
+where
+
+.. math::    w_i = L_i / \sum_{i = 1}^n {L_i}
+
+and, therefore
+
+.. math::    L_{eff} = \sum_{i = 1}^n {L_i}
+
+Instead of a weighted average, one often computes a weighted
+sum, a simplest case being a difference or sum of two categories.
+For a difference :math:`w_1 = 1` and :math:`w_2 = -1` and
+thus
+
+.. math::    1 / L_{eff} = 1 / L_1 + 1 / L_2
+
+or
+
+.. math::    L_{eff} = \frac{L_1 L_2}{L_1 + L_2}
+
+Interestingly, the same holds for a sum, where :math:`w_1 = w_2 = 1`.
+Generalizing, for any combination of sums and differences, where :math:`w_i = 1` or :math:`w_i = -1`, :math:`i = 1 \dotso n`,
+we have
+
+.. math::    1 / L_{eff} = \sum_{i = 1}^n {1/{L_i}}
+
+.. _CBBDDBGF:
+
+Inverse-operator decomposition
+##############################
+
+The program :ref:`mne_inverse_operator` calculates
+the decomposition :math:`A = \tilde{G} R^C = U \Lambda \bar{V^T}`,
+described in :ref:`CHDBEHBC`. It is normally invoked from the convenience
+script :ref:`mne_do_inverse_operator`. 
+
+
+.. _CBBECEDE:
+
+Producing movies and snapshots
+##############################
+
+:ref:`mne_make_movie` is a program
+for producing movies and snapshot graphics frames without any graphics
+output to the screen. In addition, :ref:`mne_make_movie` can
+produce stc or w files which contain the numerical current estimate
+data in a simple binary format for postprocessing. These files can
+be displayed in :ref:`mne_analyze`,
+see :ref:`ch_interactive_analysis`, utilized in the cross-subject averaging
+process, see :ref:`ch_morph`, and read into Matlab using the MNE
+Matlab toolbox, see :ref:`ch_matlab`.
+
+
+.. _CBBCGHAH:
+
+Computing inverse from raw and evoked data
+##########################################
+
+The purpose of the utility :ref:`mne_compute_raw_inverse` is
+to compute inverse solutions from either evoked-response or raw
+data at specified ROIs (labels) and to save the results in a fif
+file which can be viewed with :ref:`mne_browse_raw`,
+read to Matlab directly using the MNE Matlab Toolbox, see :ref:`ch_matlab`,
+or converted to Matlab format using either :ref:`mne_convert_mne_data`,
+:ref:`mne_raw2mat`, or :ref:`mne_epochs2mat`. See
+:ref:`mne_compute_raw_inverse` for command-line options.
+
+.. _CBBHJDAI:
+
+Implementation details
+======================
+
+The fif files output from mne_compute_raw_inverse have
+various fields of the channel information set to facilitate interpretation
+by postprocessing software as follows:
+
+**channel name**
+
+    Will be set to J[xyz] <*number*> ,
+    where the source component is indicated by the coordinat axis name
+    and number is the vertex number, starting from zero, in the complete
+    triangulation of the hemisphere in question.
+
+**logical channel number**
+
+    Will be set to is the vertex number, starting from zero, in the
+    complete triangulation of the hemisphere in question.
+
+**sensor location**
+
+    The location of the vertex in head coordinates or in MRI coordinates,
+    determined by the ``--mricoord`` flag.
+
+**sensor orientation**
+
+    The *x*-direction unit vector will point to the
+    direction of the current. Other unit vectors are set to zero. Again,
+    the coordinate system in which the orientation is expressed depends
+    on the ``--mricoord`` flag.
+
+The ``--align_z`` flag tries to align the signs
+of the signals at different vertices of the label. For this purpose,
+the surface normals within the label are collected into a :math:`n_{vert} \times 3` matrix.
+The preferred orientation will be taken as the first right singular
+vector of this matrix, corresponding to its largest singular value.
+If the dot product of the surface normal of a vertex is negative,
+the sign of the estimates at this vertex are inverted. The inversion
+is reflected in the current direction vector listed in the channel
+information, see above.
+
+.. note:: The raw data files output by :ref:`mne_compute_raw_inverse` can be converted to mat files with :ref:`mne_raw2mat`. Alternatively, the files can be read directly from Matlab using the routines in the MNE Matlab toolbox, see :ref:`ch_matlab`. The evoked data output can be easily read directly from Matlab using the fiff_load_evoked routine in the MNE Matlab toolbox. Both raw data and evoked output files can be loaded into :ref:`mne_browse_raw`, see :ref:`ch_browse`.
diff --git a/doc/manual/source_localization/morph.rst b/doc/manual/source_localization/morph.rst
new file mode 100644
index 0000000..9cf7f76
--- /dev/null
+++ b/doc/manual/source_localization/morph.rst
@@ -0,0 +1,141 @@
+
+
+.. _ch_morph:
+
+======================
+Morphing and averaging
+======================
+
+.. contents:: Contents
+   :local:
+   :depth: 2
+
+Overview
+########
+
+The spherical morphing of the surfaces accomplished by FreeSurfer can be
+employed to bring data from different subjects into a common anatomical
+frame. This chapter describes utilities which make use of the spherical morphing
+procedure. mne_morph_labels morphs
+label files between subjects allowing the definition of labels in
+a one brain and transforming them to anatomically analogous labels
+in another. mne_average_estimates offers
+the capability to compute averages of data computed with the MNE software
+across subjects.
+
+.. _CHDJDHII:
+
+The morphing maps
+#################
+
+The MNE software accomplishes morphing with help of morphing
+maps which can be either computed on demand or precomputed using mne_make_morph_maps ,
+see :ref:`CHDBBHDH`. The morphing is performed with help
+of the registered spherical surfaces (``lh.sphere.reg`` and ``rh.sphere.reg`` )
+which must be produced in FreeSurfer .
+A morphing map is a linear mapping from cortical surface values
+in subject A (:math:`x^{(A)}`) to those in another
+subject B (:math:`x^{(B)}`)
+
+.. math::    x^{(B)} = M^{(AB)} x^{(A)}\ ,
+
+where :math:`M^{(AB)}` is a sparse matrix
+with at most three nonzero elements on each row. These elements
+are determined as follows. First, using the aligned spherical surfaces,
+for each vertex :math:`x_j^{(B)}`, find the triangle :math:`T_j^{(A)}` on the
+spherical surface of subject A which contains the location :math:`x_j^{(B)}`.
+Next, find the numbers of the vertices of this triangle and set
+the corresponding elements on the *j* th row of :math:`M^{(AB)}` so that :math:`x_j^{(B)}` will
+be a linear interpolation between the triangle vertex values reflecting
+the location :math:`x_j^{(B)}` within the triangle :math:`T_j^{(A)}`.
+
+It follows from the above definition that in general
+
+.. math::    M^{(AB)} \neq (M^{(BA)})^{-1}\ ,
+
+*i.e.*,
+
+.. math::    x_{(A)} \neq M^{(BA)} M^{(AB)} x^{(A)}\ ,
+
+even if
+
+.. math::    x^{(A)} \approx M^{(BA)} M^{(AB)} x^{(A)}\ ,
+
+*i.e.*, the mapping is *almost* a
+bijection.
+
+.. _CHDEBAHH:
+
+About smoothing
+###############
+
+The current estimates are normally defined only in a decimated
+grid which is a sparse subset of the vertices in the triangular
+tessellation of the cortical surface. Therefore, any sparse set
+of values is distributed to neighboring vertices to make the visualized
+results easily understandable. This procedure has been traditionally
+called smoothing but a more appropriate name
+might be smudging or blurring in
+accordance with similar operations in image processing programs.
+
+In MNE software terms, smoothing of the vertex data is an
+iterative procedure, which produces a blurred image :math:`x^{(N)}` from
+the original sparse image :math:`x^{(0)}` by applying
+in each iteration step a sparse blurring matrix:
+
+.. math::    x^{(p)} = S^{(p)} x^{(p - 1)}\ .
+
+On each row :math:`j` of the matrix :math:`S^{(p)}` there
+are :math:`N_j^{(p - 1)}` nonzero entries whose values
+equal :math:`1/N_j^{(p - 1)}`. Here :math:`N_j^{(p - 1)}` is
+the number of immediate neighbors of vertex :math:`j` which
+had non-zero values at iteration step :math:`p - 1`.
+Matrix :math:`S^{(p)}` thus assigns the average
+of the non-zero neighbors as the new value for vertex :math:`j`.
+One important feature of this procedure is that it tends to preserve
+the amplitudes while blurring the surface image.
+
+Once the indices non-zero vertices in :math:`x^{(0)}` and
+the topology of the triangulation are fixed the matrices :math:`S^{(p)}` are
+fixed and independent of the data. Therefore, it would be in principle
+possible to construct a composite blurring matrix
+
+.. math::    S^{(N)} = \prod_{p = 1}^N {S^{(p)}}\ .
+
+However, it turns out to be computationally more effective
+to do blurring with an iteration. The above formula for :math:`S^{(N)}` also
+shows that the smudging (smoothing) operation is linear.
+
+.. _CHDBBHDH:
+
+Precomputing the morphing maps
+##############################
+
+The utility mne_make_morph_maps was
+created to assist mne_analyze and mne_make_movie in
+morphing. Since the morphing maps described above take a while to
+compute, it is beneficial to construct all necessary maps in advance
+before using mne_make_movie .
+The precomputed morphing maps are located in ``$SUBJECTS_DIR/morph-maps`` . mne_make_morph_maps creates
+this directory automatically if it does not exist. If this directory
+exists when mne_analyze or mne_make_movie is run
+and morphing is requested, the software first looks for already
+existing morphing maps there. Also, if mne_analyze or mne_make_movie have
+to recompute any morphing maps, they will be saved to ``$SUBJECTS_DIR/morph-maps`` if
+this directory exists.
+
+The names of the files in ``$SUBJECTS_DIR/morph-maps`` are
+of the form:
+
+ <*A*> - <*B*> -``morph.fif`` ,
+
+where <*A*> and <*B*> are
+names of subjects. These files contain the maps for both hemispheres,
+and in both directions, *i.e.*, both :math:`M^{(AB)}` and :math:`M^{(BA)}`, as
+defined above. Thus the files <*A*> - <*B*> -``morph.fif`` or <*B*> - <*A*> -``morph.fif`` are
+functionally equivalent. The name of the file produced by mne_analyze or mne_make_movie depends
+on the role of <*A*> and <*B*> in
+the analysis.
+
+If you choose to compute the morphing maps in batch in advance,
+use :ref:`mne_make_morph_maps`.
diff --git a/doc/manual/statistics.rst b/doc/manual/statistics.rst
new file mode 100644
index 0000000..3692d69
--- /dev/null
+++ b/doc/manual/statistics.rst
@@ -0,0 +1,100 @@
+==========
+Statistics
+==========
+
+MNE-Python provides different parametric and
+and non-parametric statistics in :mod:`mne.stats` which are specially designed
+for analyzing mass-univariate hypotheses on neuroimaging data.
+
+
+Parametric statistics
+---------------------
+
+Models
+^^^^^^
+
+- :func:`mne.stats.linear_regression` allows to compute ordinary least square
+  regressions on multiple targets, e.g., sensors, time points across trials
+  (samples). For each regressor it returns the beta values, t-staistics, and
+  uncorrected significance values. While it can be used as a test it is
+  particularly useful to compute weighted averages.
+
+- :func:`mne.stats.f_mway_rm` computes a generalized M-way repeated
+  measures ANOVA for balancd designs. It returns mass-univariate F-statistics
+  and p-valus. The associated helper function
+  :func:`mne.stats.f_threshold_mway_rm` allows to determine the F-threshold
+  at a given significance level and set of degrees of freedom. Note that
+  this set of functions was previously called `mne.stats.f_twoway_rm` and
+  `mne.stats.f_threshold_twoway_rm`, respectively, only supporting 2-way
+  factorial designs.
+
+- :func:`mne.stats.ttest_1samp_no_p` is an optimized version of the one sample
+  t-test provided by scipy. It is used by default for contrast enhancement in
+  :func:`mne.stats.permutation_cluster_1samp_test` and
+  :func:`mne.stats.spatio_temporal_permutation_cluster_1samp_test`.
+
+- :func:`mne.stats.parametric.f_oneway` is an optimized version of the F-test
+  for independent samples provided by scipy.
+  It can be used in the context of non-paramteric permutation tests to
+  compute various F-contrasts. It is used by default for contrast enhancement in
+  :func:`mne.stats.spatio_temporal_permutation_cluster_test` and
+  :func:`mne.stats.permutation_cluster_test`.
+
+
+Multiple comparisons
+^^^^^^^^^^^^^^^^^^^^
+
+In MEG and EEG analyses typically involve multiple measurements
+(sensors, time points) for each sample. In a mass-univariate analysis fitting
+statistical models for each of these observations a multiple comparison problem
+occurs (MCPP). MNE-Python provides the following functions to control for
+multiple comparison:
+
+- :func:`mne.stats.bonferroni_correction` returns a boolean mask of rejection
+  decisions and the corrected p-values. The Bonferroni correction reflects the
+  most conservative choice and corrects for the MCPP by multiplying the
+  p-values by the number of observations
+
+- :func:`mne.stats.fdr_correction` implements False discovery rate (FDR) and
+  also returns a boolean mask of rejection decisions and the corrected p-values.
+
+More flexible handling of the MCPP can be achieved by non-parametric statistics.
+
+
+Non-paramteric statistics
+-------------------------
+
+Permutation clustering
+^^^^^^^^^^^^^^^^^^^^^^
+
+As MEG and EEG data are subject to considerable spatiotemporal correlation
+the assumption of independence between observations is hard to justify.
+As a consequence the MCPP is overestimated when employing paramteric
+mass-univariate statistics. A flexble alternative is given by non-parametric
+permutation clustering statistics which implement a spatiotemporal priors
+and typically allow for clusterwise inference.
+These tests can be applied over a wide range of situations inclduing single subject and group analyses
+in time, space and frequency. The only requirement is that the scientific hypothesis can be mapped
+onto an exchangeability null hypothesis in which two or more conditions can be compared and exchanged
+across permutations to generate an empirical distribution.
+
+The clustering permutation API in MNE-Python is grouped according to different contrasts of interest
+and clustering connectivity prior, i.e., assumptions about the grouping and neighborhood of the observations.
+
+- :func:`mne.stats.permutation_cluster_1samp_test` supports paired contrasts with spatial prior.
+
+- :func:`mne.stats.permutation_cluster_test` supports F-contrasts with spatial prior.
+
+- :func:`mne.stats.spatio_temporal_permutation_cluster_1samp_test` supports paired contrasts without spatial prior.
+
+- :func:`mne.stats.spatio_temporal_permutation_cluster_test` supports F-contrasts without spatial prior.
+
+Using the TFCE option observation- instead of cluster-wise hypothesis testing can be realized.
+
+
+.. note:: Note that the permutation clustering functions do not constitute thresholding to paramterical tests.
+    Although using F-tests and t-tests internally for contrast enhancement, the actual test statistic is
+    the cluster size.
+
+.. note:: Unless TFCE is used, the hypotheses tested are cluster-wise. This means that no inference is provided
+    for individual time points, sensors, dipole locations or frequencies in such a cluster.
diff --git a/doc/manual/time_frequency.rst b/doc/manual/time_frequency.rst
new file mode 100644
index 0000000..250c176
--- /dev/null
+++ b/doc/manual/time_frequency.rst
@@ -0,0 +1,34 @@
+====================================
+Spectral and Time-frequency Analysis
+====================================
+
+
+Source Space
+^^^^^^^^^^^^
+
+Currently, MNE-Python provides a set of functions
+allowing to compute spectral analyses in the source space.
+Many these functions return :func:`mne.SourceEstimate` objects or collections thereof.
+
+.. note::
+    The :func:`mne.SourceEstimate` object was initially designed for classical time-domain analyses.
+    In this context, the time axis can actually refer to frequencies. This might be improved
+    in the future.
+
+
+The following functions are based on minimum norm estimates (MNE).
+
+- :func:`mne.minimum_norm.compute_source_psd_epochs` returns single-trial power spectral density (PSD) esitmates using multi-tapers.
+Here, the time axis actually refers to frequencies, even if labled as time.
+
+- :func:`mne.minimum_norm.compute_source_psd` returns power spectral density (PSD) esitmates from continous data usign FFT.
+Here, the time axis actually refers to frequencies, even if labled as time.
+
+- :func:`mne.minimum_norm.source_band_induced_power` returns a collection of time-domain :func:`mne.SourceEstimate` for each
+frequency band, based on Morlet-Wavelets.
+
+- :func:`mne.minimum_norm.source_induced_power` returns power and inter-trial-coherence (ITC) as raw numpy arrays, based on Morlet-Wavelets.
+
+Alternatively, the source power spectral density can also be estimated using the DICS beamformer,
+see ``:func:mne.beamformer.dics_source_power`.
+ 
\ No newline at end of file
diff --git a/doc/manual/visualization.rst b/doc/manual/visualization.rst
new file mode 100644
index 0000000..cfefd39
--- /dev/null
+++ b/doc/manual/visualization.rst
@@ -0,0 +1,3 @@
+=============
+Visualization
+=============
diff --git a/doc/mne_cpp.rst b/doc/mne_cpp.rst
new file mode 100644
index 0000000..86b00b1
--- /dev/null
+++ b/doc/mne_cpp.rst
@@ -0,0 +1,18 @@
+.. _mne_cpp:
+
+======================
+MNE with CPP
+======================
+
+MNE-CPP is a cross-platform application and algorithm C++ framework for MEG and EEG data analysis and acquisition. It provides a modular structure with many sub-libraries. These libraries can be easily integrated into your project to, e.g. provide full I/O support for the fiff-file format or files generated by the MNE suite. MNE-CPP comes with its own acquisition software, MNE-X, which can be used to acquire and process data from Elekta Neuromag MEG VectorView, BabyMEG, TMSI EEG and eegosports.
+For further information please visit the MNE-CPP project pages:
+
+  * `Project Page <http://www.tu-ilmenau.de/bmti/forschung/datenanalyse-modellierung-und-inverse-verfahren/mne-cpp/>`_
+  * `Class Reference <http://www2.tu-ilmenau.de/mne-cpp/space/doc/>`_
+  * `GitHub Sources <https://github.com/mne-tools/mne-cpp/>`_
+
+.. raw:: html
+
+    <div>
+        <script type="text/javascript" src="http://www.openhub.net/p/687714/widgets/project_basic_stats.js"></script>
+    </div>
diff --git a/doc/source/python_reference.rst b/doc/python_reference.rst
similarity index 77%
rename from doc/source/python_reference.rst
rename to doc/python_reference.rst
index 5c3f7fe..eda6534 100644
--- a/doc/source/python_reference.rst
+++ b/doc/python_reference.rst
@@ -1,21 +1,17 @@
-=========
-Reference
-=========
+.. _api_reference:
 
-.. automodule:: mne
-   :no-members:
-   :no-inherited-members:
+=============
+API Reference
+=============
 
 This is the classes and functions reference of mne-python. Functions are
-grouped thematically by analysis stage. In addition, all File I/O functions
-are collected in a separate section. Functions and classes that are not below
-a module heading are found in the :py:mod:`mne` namespace.
+grouped thematically by analysis stage. Functions and classes that are not
+below a module heading are found in the :py:mod:`mne` namespace.
 
 
-.. toctree::
-   :maxdepth: 2
-
-   python_reference
+.. contents::
+   :local:
+   :depth: 2
 
 
 Classes
@@ -28,19 +24,26 @@ Classes
    :template: class.rst
 
    io.Raw
-   io.RawFIFF
+   io.RawFIF
    Epochs
    Evoked
+   SourceSpaces
    SourceEstimate
+   VolSourceEstimate
+   MixedSourceEstimate
    Covariance
+   Dipole
    Label
    BiHemiLabel
+   Transform
    preprocessing.ICA
    decoding.CSP
    decoding.Scaler
    decoding.ConcatenateChannels
    decoding.FilterEstimator
    decoding.PSDEstimator
+   decoding.GeneralizationAcrossTime
+   decoding.TimeDecoding
    realtime.RtEpochs
    realtime.RtClient
    realtime.MockRtClient
@@ -103,6 +106,7 @@ Functions:
   read_raw_kit
   read_raw_brainvision
   read_raw_egi
+  read_raw_fif
 
 .. currentmodule:: mne.io.kit
 
@@ -112,12 +116,7 @@ Functions:
   :toctree: generated/
   :template: function.rst
 
-   read_elp
-   read_hsp
    read_mrk
-   write_hsp
-   write_mrk
-
 
 File I/O
 ========
@@ -127,19 +126,21 @@ File I/O
 Functions:
 
 .. autosummary::
-   :toctree: generated/
+   :toctree: generated
    :template: function.rst
 
    decimate_surface
    get_head_surf
    get_meg_helmet_surf
+   get_volume_labels_from_aseg
    parse_config
    read_labels_from_annot
    read_bem_solution
    read_bem_surfaces
    read_cov
-   read_dip
+   read_dipole
    read_epochs
+   read_epochs_kit
    read_events
    read_evokeds
    read_forward_solution
@@ -154,7 +155,8 @@ Functions:
    read_trans
    save_stc_as_volume
    write_labels_to_annot
-   write_bem_surface
+   write_bem_solution
+   write_bem_surfaces
    write_cov
    write_events
    write_evokeds
@@ -236,6 +238,22 @@ Sample datasets
 
    data_path
 
+:py:mod:`mne.datasets.brainstorm`:
+
+.. automodule:: mne.datasets.brainstorm
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.datasets.brainstorm
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   bst_auditory.data_path
+   bst_resting.data_path
+   bst_raw.data_path
+
 :py:mod:`mne.datasets.megsim`:
 
 .. automodule:: mne.datasets.megsim
@@ -263,6 +281,16 @@ Visualization
 
 .. currentmodule:: mne.viz
 
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   ClickableImage
+
+Functions:
+
 .. autosummary::
    :toctree: generated/
    :template: function.rst
@@ -271,25 +299,34 @@ Visualization
    mne_analyze_colormap
    plot_connectivity_circle
    plot_cov
+   plot_dipole_amplitudes
+   plot_dipole_locations
    plot_drop_log
+   plot_epochs
+   plot_events
    plot_evoked
    plot_evoked_image
    plot_evoked_topomap
    plot_evoked_field
+   plot_evoked_white
    plot_ica_sources
    plot_ica_components
    plot_ica_scores
    plot_ica_overlay
-   plot_image_epochs
+   plot_epochs_image
+   plot_montage
+   plot_projs_topomap
    plot_raw
-   plot_raw_psds
+   plot_raw_psd
+   plot_snr_estimate
    plot_source_estimates
    plot_sparse_source_estimates
+   plot_tfr_topomap
    plot_topo
    plot_topo_image_epochs
-   plot_topo_tfr
    plot_topomap
    compare_fiff
+   add_background_image
 
 .. currentmodule:: mne.io
 
@@ -316,6 +353,38 @@ Projections:
    read_proj
    write_proj
 
+Manipulate channels and set sensors locations for processing and plotting:
+
+.. currentmodule:: mne.channels
+
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   Layout
+   Montage
+   DigMontage
+
+Functions:
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   fix_mag_coil_types
+   read_montage
+   read_dig_montage
+   read_layout
+   find_layout
+   make_eeg_layout
+   make_grid_layout
+   read_ch_connectivity
+   equalize_channels
+   rename_channels
+   generate_2d_layout
+
 :py:mod:`mne.preprocessing`:
 
 .. automodule:: mne.preprocessing
@@ -334,12 +403,22 @@ Projections:
    create_eog_epochs
    find_ecg_events
    find_eog_events
-   find_outlier_adaptive
    ica_find_ecg_events
    ica_find_eog_events
    read_ica
    run_ica
-   infomax
+
+EEG referencing:
+
+.. currentmodule:: mne.io
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   add_reference_channels
+   set_bipolar_reference
+   set_eeg_reference
 
 :py:mod:`mne.filter`:
 
@@ -395,6 +474,7 @@ Events
    combine_event_ids
    equalize_epoch_counts
    add_channels_epochs
+   concatenate_epochs
 
 Sensor Space Data
 =================
@@ -405,17 +485,17 @@ Sensor Space Data
    :toctree: generated/
    :template: function.rst
 
+   combine_evoked
    concatenate_raws
    equalize_channels
+   grand_average
    get_chpi_positions
    pick_channels
    pick_channels_cov
    pick_channels_forward
    pick_channels_regexp
    pick_types
-   pick_types_evoked
    pick_types_forward
-   read_ch_connectivity
    read_epochs
    read_reject_parameters
    read_selection
@@ -432,10 +512,19 @@ Covariance
    :template: function.rst
 
    compute_covariance
-   compute_raw_data_covariance
+   compute_raw_covariance
+   make_ad_hoc_cov
    read_cov
    write_cov
 
+.. currentmodule:: mne.cov
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   regularize
+
 
 MRI Processing
 ==============
@@ -481,8 +570,12 @@ Functions:
    average_forward_solutions
    convert_forward_solution
    do_forward_solution
+   make_bem_model
+   make_bem_solution
    make_forward_solution
    make_field_map
+   make_sphere_model
+   morph_source_spaces
    read_bem_surfaces
    read_forward_solution
    read_trans
@@ -494,6 +587,16 @@ Functions:
    write_bem_surface
    write_trans
 
+.. currentmodule:: mne.bem
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   make_watershed_bem
+   make_flash_bem
+   convert_flash_mris
+
 .. currentmodule:: mne.forward
 
 .. autosummary::
@@ -503,6 +606,12 @@ Functions:
    restrict_forward_to_label
    restrict_forward_to_stc
 
+:py:mod:`mne.source_space`:
+
+.. automodule:: mne.source_space
+   :no-members:
+   :no-inherited-members:
+
 
 Inverse Solutions
 =================
@@ -532,7 +641,10 @@ Functions:
    apply_inverse
    apply_inverse_epochs
    apply_inverse_raw
+   compute_source_psd
+   compute_source_psd_epochs
    compute_rank_inverse
+   estimate_snr
    make_inverse_operator
    read_inverse_operator
    source_band_induced_power
@@ -575,6 +687,19 @@ Functions:
    dics
    dics_epochs
    dics_source_power
+   rap_music
+
+:py:mod:`mne`:
+
+.. currentmodule:: mne
+
+Functions:
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   fit_dipole
 
 
 Source Space Data
@@ -594,17 +719,16 @@ Source Space Data
    label_sign_flip
    morph_data
    morph_data_precomputed
-   read_annot
-   read_dip
+   read_labels_from_annot
+   read_dipole
    read_label
    read_source_estimate
    save_stc_as_volume
    split_label
    stc_to_label
-   transform_coordinates
    transform_surface_to
    vertex_to_mni
-   write_annot
+   write_labels_to_annot
    write_label
 
 
@@ -619,25 +743,62 @@ Time-Frequency
 
 .. currentmodule:: mne.time_frequency
 
+Classes:
+
+.. autosummary::
+   :toctree: generated/
+   :template: class.rst
+
+   AverageTFR
+
+Functions that operate on mne-python objects:
+
 .. autosummary::
    :toctree: generated/
    :template: function.rst
 
-   ar_raw
-   compute_raw_psd
+   compute_epochs_csd
    compute_epochs_psd
-   iir_filter_raw
-   morlet
+   compute_raw_psd
+   fit_iir_model_raw
    tfr_morlet
+   tfr_multitaper
+   tfr_stockwell
+   read_tfrs
+   write_tfrs
+
+Functions that operate on ``np.ndarray`` objects:
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   cwt_morlet
+   dpss_windows
+   morlet
+   multitaper_psd
    single_trial_power
-   yule_walker
-   ar_raw
-   iir_filter_raw
    stft
    istft
    stftfreq
 
 
+:py:mod:`mne.time_frequency.tfr`:
+
+.. automodule:: mne.time_frequency.tfr
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: mne.time_frequency.tfr
+
+.. autosummary::
+   :toctree: generated/
+   :template: function.rst
+
+   cwt
+   morlet
+
+
 Connectivity Estimation
 =======================
 
@@ -682,6 +843,8 @@ Statistics
    spatio_temporal_cluster_1samp_test
    ttest_1samp_no_p
    linear_regression
+   linear_regression_raw
+   f_mway_rm
 
 Functions to compute connectivity (adjacency) matrices for cluster-level statistics
 
@@ -714,8 +877,10 @@ Simulation
    :toctree: generated/
    :template: function.rst
 
-   generate_evoked
-   generate_sparse_stc
+   simulate_evoked
+   simulate_raw
+   simulate_stc
+   simulate_sparse_stc
    select_source_in_label
 
 Decoding
@@ -738,6 +903,7 @@ Classes:
    PSDEstimator
    FilterEstimator
    CSP
+   GeneralizationAcrossTime
 
 Realtime
 ========
diff --git a/doc/source/manual/reading.rst b/doc/references.rst
similarity index 100%
rename from doc/source/manual/reading.rst
rename to doc/references.rst
diff --git a/doc/source/_images/plot_time_frequency.png b/doc/source/_images/plot_time_frequency.png
deleted file mode 100644
index da7456b..0000000
Binary files a/doc/source/_images/plot_time_frequency.png and /dev/null differ
diff --git a/doc/source/_static/default.css b/doc/source/_static/default.css
deleted file mode 100755
index 4e9adfd..0000000
--- a/doc/source/_static/default.css
+++ /dev/null
@@ -1,515 +0,0 @@
-/**
- * Alternate Sphinx design
- * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
- */
-
-body {
-    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
-    font-size: 14px;
-    letter-spacing: -0.01em;
-    line-height: 150%;
-    text-align: center;
-    /*background-color: #AFC1C4; */
-    /*background-color: -moz-linear-gradient(linear, left top, left bottom, from(#660000), to(#000000));*/
-    background-color: #151515;
-    color: black;
-    padding: 0;
-    border: 1px solid #aaa;
-
-    margin: 0px 80px 0px 80px;
-    min-width: 740px;
-}
-
-a {
-    color: #330033;
-    text-decoration: none;
-}
-
-a:hover {
-    color: #99CCFF;
-}
-
-pre {
-    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.015em;
-    padding: 0.5em;
-    border: 1px solid #ccc;
-    background-color: #f8f8f8;
-}
-
-td.linenos pre {
-    padding: 0.5em 0;
-    border: 0;
-    background-color: #000000;
-    color: #aaa;
-}
-
-table.highlighttable {
-    margin-left: 0.5em;
-}
-
-table.highlighttable td {
-    padding: 0 0.5em 0 0.5em;
-}
-
-cite, code, tt {
-    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.01em;
-}
-
-hr {
-    border: 1px solid #abc;
-    margin: 2em;
-}
-
-tt {
-    background-color: #f2f2f2;
-    border-bottom: 1px solid #ddd;
-    color: #333;
-}
-
-tt.descname {
-    background-color: transparent;
-    font-weight: bold;
-    font-size: 1.2em;
-    border: 0;
-}
-
-tt.descclassname {
-    background-color: transparent;
-    border: 0;
-}
-
-tt.xref {
-    background-color: transparent;
-    font-weight: bold;
-    border: 0;
-}
-
-a tt {
-    background-color: transparent;
-    font-weight: bold;
-    border: 0;
-    color: #CA7900;
-}
-
-a tt:hover {
-    color: #2491CF;
-}
-
-dl {
-    margin-bottom: 15px;
-}
-
-dd p {
-    margin-top: 0px;
-}
-
-dd ul, dd table {
-    margin-bottom: 10px;
-}
-
-dd {
-    margin-top: 3px;
-    margin-bottom: 10px;
-    margin-left: 30px;
-}
-
-.refcount {
-    color: #060;
-}
-
-dt:target,
-.highlight {
-    background-color: #fbe54e;
-}
-
-dl.class, dl.function {
-    border-top: 2px solid #888;
-}
-
-dl.method, dl.attribute {
-    border-top: 1px solid #aaa;
-}
-
-dl.glossary dt {
-    font-weight: bold;
-    font-size: 1.1em;
-}
-
-pre {
-    line-height: 120%;
-}
-
-pre a {
-    color: inherit;
-    text-decoration: underline;
-}
-
-.first {
-    margin-top: 0 !important;
-}
-
-div.document {
-    background-color: white;
-    text-align: left;
-    background-image: url(contents.png);
-    background-repeat: repeat-x;
-}
-
-/*
-div.documentwrapper {
-    width: 100%;
-}
-*/
-
-div.clearer {
-    clear: both;
-}
-
-div.related h3 {
-    display: none;
-}
-
-div.related ul {
-    background-image: url(navigation.png);
-    height: 2em;
-    list-style: none;
-    border-top: 1px solid #ddd;
-    border-bottom: 1px solid #ddd;
-    margin: 0;
-    padding-left: 10px;
-}
-
-div.related ul li {
-    margin: 0;
-    padding: 0;
-    height: 2em;
-    float: left;
-}
-
-div.related ul li.right {
-    float: right;
-    margin-right: 5px;
-}
-
-div.related ul li a {
-    margin: 0;
-    padding: 0 5px 0 5px;
-    line-height: 1.75em;
-    color: #FFFFFF;
-}
-
-div.related ul li a:hover {
-    color: #C0C0C0;
-}
-
-div.body {
-    margin: 0;
-    padding: 0.5em 20px 20px 20px;
-}
-
-div.bodywrapper {
-    margin: 0 240px 0 0;
-    border-right: 1px solid #ccc;
-}
-
-div.body a {
-    text-decoration: underline;
-}
-
-div.sphinxsidebar {
-    margin: 0;
-    padding: 0.5em 15px 15px 0;
-    width: 210px;
-    float: right;
-    text-align: left;
-/*    margin-left: -100%; */
-}
-
-div.sphinxsidebar h4, div.sphinxsidebar h3 {
-    margin: 1em 0 0.5em 0;
-    font-size: 0.9em;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    border: 1px solid #86989B;
-    background-color: #C0C0C0;
-}
-
-div.sphinxsidebar ul {
-    padding-left: 1.5em;
-    margin-top: 7px;
-    list-style: none;
-    padding: 0;
-    line-height: 130%;
-}
-
-div.sphinxsidebar ul ul {
-    list-style: square;
-    margin-left: 20px;
-}
-
-p {
-    margin: 0.8em 0 0.5em 0;
-}
-
-p.rubric {
-    font-weight: bold;
-}
-
-h1 {
-    margin: 0;
-    padding: 0.7em 0 0.3em 0;
-    font-size: 1.5em;
-    color: #11557C;
-}
-
-h2 {
-    margin: 1.3em 0 0.2em 0;
-    font-size: 1.35em;
-    padding: 0;
-}
-
-h3 {
-    margin: 1em 0 -0.3em 0;
-    font-size: 1.2em;
-}
-
-h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
-    color: black!important;
-}
-
-h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
-    display: none;
-    margin: 0 0 0 0.3em;
-    padding: 0 0.2em 0 0.2em;
-    color: #aaa!important;
-}
-
-h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
-h5:hover a.anchor, h6:hover a.anchor {
-    display: inline;
-}
-
-h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
-h5 a.anchor:hover, h6 a.anchor:hover {
-    color: #777;
-    background-color: #eee;
-}
-
-table {
-    border-collapse: collapse;
-    margin: 0 -0.5em 0 -0.5em;
-}
-
-table td, table th {
-    padding: 0.2em 0.5em 0.2em 0.5em;
-}
-
-div.footer {
-    background-color: #C0C0C0;
-    color: #000000;
-    padding: 3px 8px 3px 0;
-    clear: both;
-    font-size: 0.8em;
-    text-align: right;
-}
-
-div.footer a {
-    color: #000000;
-    text-decoration: underline;
-}
-
-div.pagination {
-    margin-top: 2em;
-    padding-top: 0.5em;
-    border-top: 1px solid black;
-    text-align: center;
-}
-
-div.sphinxsidebar ul.toc {
-    margin: 1em 0 1em 0;
-    padding: 0 0 0 0.5em;
-    list-style: none;
-}
-
-div.sphinxsidebar ul.toc li {
-    margin: 0.5em 0 0.5em 0;
-    font-size: 0.9em;
-    line-height: 130%;
-}
-
-div.sphinxsidebar ul.toc li p {
-    margin: 0;
-    padding: 0;
-}
-
-div.sphinxsidebar ul.toc ul {
-    margin: 0.2em 0 0.2em 0;
-    padding: 0 0 0 1.8em;
-}
-
-div.sphinxsidebar ul.toc ul li {
-    padding: 0;
-}
-
-div.admonition, div.warning {
-    font-size: 0.9em;
-    margin: 1em 0 0 0;
-    border: 1px solid #86989B;
-    background-color: #f7f7f7;
-}
-
-div.admonition p, div.warning p {
-    margin: 0.5em 1em 0.5em 1em;
-    padding: 0;
-}
-
-div.admonition pre, div.warning pre {
-    margin: 0.4em 1em 0.4em 1em;
-}
-
-div.admonition p.admonition-title,
-div.warning p.admonition-title {
-    margin: 0;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    border-bottom: 1px solid #86989B;
-    font-weight: bold;
-    background-color: #AFC1C4;
-}
-
-div.warning {
-    border: 1px solid #000000;
-}
-
-div.warning p.admonition-title {
-    background-color: #000000;
-    border-bottom-color: #940000;
-}
-
-div.admonition ul, div.admonition ol,
-div.warning ul, div.warning ol {
-    margin: 0.1em 0.5em 0.5em 3em;
-    padding: 0;
-}
-
-div.versioninfo {
-    margin: 1em 0 0 0;
-    border: 1px solid #ccc;
-    background-color: #DDEAF0;
-    padding: 8px;
-    line-height: 1.3em;
-    font-size: 0.9em;
-}
-
-
-a.headerlink {
-    color: #c60f0f!important;
-    font-size: 1em;
-    margin-left: 6px;
-    padding: 0 4px 0 4px;
-    text-decoration: none!important;
-    visibility: hidden;
-}
-
-h1:hover > a.headerlink,
-h2:hover > a.headerlink,
-h3:hover > a.headerlink,
-h4:hover > a.headerlink,
-h5:hover > a.headerlink,
-h6:hover > a.headerlink,
-dt:hover > a.headerlink {
-    visibility: visible;
-}
-
-a.headerlink:hover {
-    background-color: #ccc;
-    color: white!important;
-}
-
-table.indextable td {
-    text-align: left;
-    vertical-align: top;
-}
-
-table.indextable dl, table.indextable dd {
-    margin-top: 0;
-    margin-bottom: 0;
-}
-
-table.indextable tr.pcap {
-    height: 10px;
-}
-
-table.indextable tr.cap {
-    margin-top: 10px;
-    background-color: #f2f2f2;
-}
-
-img.toggler {
-    margin-right: 3px;
-    margin-top: 3px;
-    cursor: pointer;
-}
-
-img.inheritance {
-    border: 0px
-}
-
-form.pfform {
-    margin: 10px 0 20px 0;
-}
-
-table.contentstable {
-    width: 90%;
-}
-
-table.contentstable p.biglink {
-    line-height: 150%;
-}
-
-a.biglink {
-    font-size: 1.3em;
-}
-
-span.linkdescr {
-    font-style: italic;
-    padding-top: 5px;
-    font-size: 90%;
-}
-
-ul.search {
-    margin: 10px 0 0 20px;
-    padding: 0;
-}
-
-ul.search li {
-    padding: 5px 0 5px 20px;
-    background-image: url(file.png);
-    background-repeat: no-repeat;
-    background-position: 0 7px;
-}
-
-ul.search li a {
-    font-weight: bold;
-}
-
-ul.search li div.context {
-    color: #888;
-    margin: 2px 0 0 30px;
-    text-align: left;
-}
-
-ul.keywordmatches li.goodmatch a {
-    font-weight: bold;
-}
-
-div.social-button {
-    float: left;
-    width: 120px;
-    height: 28px;
-}
-
diff --git a/doc/source/_static/institutions.png b/doc/source/_static/institutions.png
deleted file mode 100644
index c2fb35f..0000000
Binary files a/doc/source/_static/institutions.png and /dev/null differ
diff --git a/doc/source/_static/mne_logo.png b/doc/source/_static/mne_logo.png
deleted file mode 100644
index 0577fd9..0000000
Binary files a/doc/source/_static/mne_logo.png and /dev/null differ
diff --git a/doc/source/_static/navy.css b/doc/source/_static/navy.css
deleted file mode 100755
index 04912f9..0000000
--- a/doc/source/_static/navy.css
+++ /dev/null
@@ -1,515 +0,0 @@
-/**
- * Alternate Sphinx design
- * Originally created by Armin Ronacher for Werkzeug, adapted by Georg Brandl.
- */
-
-body {
-    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
-    font-size: 14px;
-    letter-spacing: -0.01em;
-    line-height: 150%;
-    text-align: center;
-    /*background-color: #AFC1C4; */
-    /*background-color: -moz-linear-gradient(linear, left top, left bottom, from(#660000), to(#000000));*/
-/*    background-color: #151515;*/
-    background-color: #11557C;
-    color: black;
-    padding: 0;
-    border: 1px solid #aaa;
-
-    margin: 0px 80px 0px 80px;
-    min-width: 740px;
-}
-
-a {
-    color: #330033;
-    text-decoration: none;
-}
-
-a:hover {
-    color: #99CCFF;
-}
-
-pre {
-    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.015em;
-    padding: 0.5em;
-    border: 1px solid #ccc;
-    background-color: #f8f8f8;
-}
-
-td.linenos pre {
-    padding: 0.5em 0;
-    border: 0;
-    background-color: #000000;
-    color: #aaa;
-}
-
-table.highlighttable {
-    margin-left: 0.5em;
-}
-
-table.highlighttable td {
-    padding: 0 0.5em 0 0.5em;
-}
-
-cite, code, tt {
-    font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.01em;
-}
-
-hr {
-    border: 1px solid #abc;
-    margin: 2em;
-}
-
-tt {
-    background-color: #f2f2f2;
-    border-bottom: 1px solid #ddd;
-    color: #333;
-}
-
-tt.descname {
-    background-color: transparent;
-    font-weight: bold;
-    font-size: 1.2em;
-    border: 0;
-}
-
-tt.descclassname {
-    background-color: transparent;
-    border: 0;
-}
-
-tt.xref {
-    background-color: transparent;
-    font-weight: bold;
-    border: 0;
-}
-
-a tt {
-    background-color: transparent;
-    font-weight: bold;
-    border: 0;
-    color: #CA7900;
-}
-
-a tt:hover {
-    color: #2491CF;
-}
-
-dl {
-    margin-bottom: 15px;
-}
-
-dd p {
-    margin-top: 0px;
-}
-
-dd ul, dd table {
-    margin-bottom: 10px;
-}
-
-dd {
-    margin-top: 3px;
-    margin-bottom: 10px;
-    margin-left: 30px;
-}
-
-.refcount {
-    color: #060;
-}
-
-dt:target,
-.highlight {
-    background-color: #fbe54e;
-}
-
-dl.class, dl.function {
-    border-top: 2px solid #888;
-}
-
-dl.method, dl.attribute {
-    border-top: 1px solid #aaa;
-}
-
-dl.glossary dt {
-    font-weight: bold;
-    font-size: 1.1em;
-}
-
-pre {
-    line-height: 120%;
-}
-
-pre a {
-    color: inherit;
-    text-decoration: underline;
-}
-
-.first {
-    margin-top: 0 !important;
-}
-
-div.document {
-    background-color: white;
-    text-align: left;
-    background-image: url(contents.png);
-    background-repeat: repeat-x;
-}
-
-/*
-div.documentwrapper {
-    width: 100%;
-}
-*/
-
-div.clearer {
-    clear: both;
-}
-
-div.related h3 {
-    display: none;
-}
-
-div.related ul {
-    background-image: url(navigation.png);
-    height: 2em;
-    list-style: none;
-    border-top: 1px solid #ddd;
-    border-bottom: 1px solid #ddd;
-    margin: 0;
-    padding-left: 10px;
-}
-
-div.related ul li {
-    margin: 0;
-    padding: 0;
-    height: 2em;
-    float: left;
-}
-
-div.related ul li.right {
-    float: right;
-    margin-right: 5px;
-}
-
-div.related ul li a {
-    margin: 0;
-    padding: 0 5px 0 5px;
-    line-height: 1.75em;
-    color: #330033;
-}
-
-div.related ul li a:hover {
-    color: #C0C0C0;
-}
-
-div.body {
-    margin: 0;
-    padding: 0.5em 20px 20px 20px;
-}
-
-div.bodywrapper {
-    margin: 0 240px 0 0;
-    border-right: 1px solid #ccc;
-}
-
-div.body a {
-    text-decoration: underline;
-}
-
-div.sphinxsidebar {
-    margin: 0;
-    padding: 0.5em 15px 15px 0;
-    width: 210px;
-    float: right;
-    text-align: left;
-/*    margin-left: -100%; */
-}
-
-div.sphinxsidebar h4, div.sphinxsidebar h3 {
-    margin: 1em 0 0.5em 0;
-    font-size: 0.9em;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    border: 1px solid #86989B;
-    background-color: #C0C0C0;
-}
-
-div.sphinxsidebar ul {
-    padding-left: 1.5em;
-    margin-top: 7px;
-    list-style: none;
-    padding: 0;
-    line-height: 130%;
-}
-
-div.sphinxsidebar ul ul {
-    list-style: square;
-    margin-left: 20px;
-}
-
-p {
-    margin: 0.8em 0 0.5em 0;
-}
-
-p.rubric {
-    font-weight: bold;
-}
-
-h1 {
-    margin: 0;
-    padding: 0.7em 0 0.3em 0;
-    font-size: 1.5em;
-    color: #11557C;
-}
-
-h2 {
-    margin: 1.3em 0 0.2em 0;
-    font-size: 1.35em;
-    padding: 0;
-}
-
-h3 {
-    margin: 1em 0 -0.3em 0;
-    font-size: 1.2em;
-}
-
-h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
-    color: black!important;
-}
-
-h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
-    display: none;
-    margin: 0 0 0 0.3em;
-    padding: 0 0.2em 0 0.2em;
-    color: #aaa!important;
-}
-
-h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
-h5:hover a.anchor, h6:hover a.anchor {
-    display: inline;
-}
-
-h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
-h5 a.anchor:hover, h6 a.anchor:hover {
-    color: #777;
-    background-color: #eee;
-}
-
-table {
-    border-collapse: collapse;
-    margin: 0 -0.5em 0 -0.5em;
-}
-
-table td, table th {
-    padding: 0.2em 0.5em 0.2em 0.5em;
-}
-
-div.footer {
-    background-color: #C0C0C0;
-    color: #000000;
-    padding: 3px 8px 3px 0;
-    clear: both;
-    font-size: 0.8em;
-    text-align: right;
-}
-
-div.footer a {
-    color: #000000;
-    text-decoration: underline;
-}
-
-div.pagination {
-    margin-top: 2em;
-    padding-top: 0.5em;
-    border-top: 1px solid black;
-    text-align: center;
-}
-
-div.sphinxsidebar ul.toc {
-    margin: 1em 0 1em 0;
-    padding: 0 0 0 0.5em;
-    list-style: none;
-}
-
-div.sphinxsidebar ul.toc li {
-    margin: 0.5em 0 0.5em 0;
-    font-size: 0.9em;
-    line-height: 130%;
-}
-
-div.sphinxsidebar ul.toc li p {
-    margin: 0;
-    padding: 0;
-}
-
-div.sphinxsidebar ul.toc ul {
-    margin: 0.2em 0 0.2em 0;
-    padding: 0 0 0 1.8em;
-}
-
-div.sphinxsidebar ul.toc ul li {
-    padding: 0;
-}
-
-div.admonition, div.warning {
-    font-size: 0.9em;
-    margin: 1em 0 0 0;
-    border: 1px solid #86989B;
-    background-color: #f7f7f7;
-}
-
-div.admonition p, div.warning p {
-    margin: 0.5em 1em 0.5em 1em;
-    padding: 0;
-}
-
-div.admonition pre, div.warning pre {
-    margin: 0.4em 1em 0.4em 1em;
-}
-
-div.admonition p.admonition-title,
-div.warning p.admonition-title {
-    margin: 0;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    border-bottom: 1px solid #86989B;
-    font-weight: bold;
-    background-color: #05804A;
-}
-
-div.warning {
-    border: 1px solid #000000;
-}
-
-div.warning p.admonition-title {
-    background-color: #940000;
-    border-bottom-color: #940000;
-}
-
-div.admonition ul, div.admonition ol,
-div.warning ul, div.warning ol {
-    margin: 0.1em 0.5em 0.5em 3em;
-    padding: 0;
-}
-
-div.versioninfo {
-    margin: 1em 0 0 0;
-    border: 1px solid #ccc;
-    background-color: #DDEAF0;
-    padding: 8px;
-    line-height: 1.3em;
-    font-size: 0.9em;
-}
-
-
-a.headerlink {
-    color: #c60f0f!important;
-    font-size: 1em;
-    margin-left: 6px;
-    padding: 0 4px 0 4px;
-    text-decoration: none!important;
-    visibility: hidden;
-}
-
-h1:hover > a.headerlink,
-h2:hover > a.headerlink,
-h3:hover > a.headerlink,
-h4:hover > a.headerlink,
-h5:hover > a.headerlink,
-h6:hover > a.headerlink,
-dt:hover > a.headerlink {
-    visibility: visible;
-}
-
-a.headerlink:hover {
-    background-color: #ccc;
-    color: white!important;
-}
-
-table.indextable td {
-    text-align: left;
-    vertical-align: top;
-}
-
-table.indextable dl, table.indextable dd {
-    margin-top: 0;
-    margin-bottom: 0;
-}
-
-table.indextable tr.pcap {
-    height: 10px;
-}
-
-table.indextable tr.cap {
-    margin-top: 10px;
-    background-color: #f2f2f2;
-}
-
-img.toggler {
-    margin-right: 3px;
-    margin-top: 3px;
-    cursor: pointer;
-}
-
-img.inheritance {
-    border: 0px
-}
-
-form.pfform {
-    margin: 10px 0 20px 0;
-}
-
-table.contentstable {
-    width: 90%;
-}
-
-table.contentstable p.biglink {
-    line-height: 150%;
-}
-
-a.biglink {
-    font-size: 1.3em;
-}
-
-span.linkdescr {
-    font-style: italic;
-    padding-top: 5px;
-    font-size: 90%;
-}
-
-ul.search {
-    margin: 10px 0 0 20px;
-    padding: 0;
-}
-
-ul.search li {
-    padding: 5px 0 5px 20px;
-    background-image: url(file.png);
-    background-repeat: no-repeat;
-    background-position: 0 7px;
-}
-
-ul.search li a {
-    font-weight: bold;
-}
-
-ul.search li div.context {
-    color: #888;
-    margin: 2px 0 0 30px;
-    text-align: left;
-}
-
-ul.keywordmatches li.goodmatch a {
-    font-weight: bold;
-}
-
-div.social-button {
-    float: left;
-    width: 120px;
-    height: 28px;
-}
diff --git a/doc/source/_templates/sidebar.html b/doc/source/_templates/sidebar.html
deleted file mode 100644
index 85d65f8..0000000
--- a/doc/source/_templates/sidebar.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<h3>Versions</h3>
-<ul class="current">
-    <li class="toctree-l1"><a href=http://martinos.org/mne/stable>Stable</a></li>
-    <li class="toctree-l1"><a href=http://martinos.org/mne/dev>Development</a></li>
-</ul>
\ No newline at end of file
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
deleted file mode 100644
index 43e0ee3..0000000
--- a/doc/source/getting_started.rst
+++ /dev/null
@@ -1,205 +0,0 @@
-.. _getting_started:
-
-Getting Started
-===============
-
-This page will help you get started with MNE-python. If you are new to Python here is a
-very good place to get started: http://scipy-lectures.github.com. If you are at the Martinos 
-Center, please see this section :ref:`inside_martinos`. If you would like to use a custom
-installation of python (or have specific questions about integrating special tools like 
-IPython notebooks), please see this section :ref:`detailed_notes`.
-
-Outside the Martinos Center
----------------------------
-
-For a fast and up to date scientific Python environment that resolves all
-dependencies you can install Enthought Canopy available at:
-
-https://www.enthought.com/products/canopy/
-
-Canopy is free for academic purposes. If you cannot benefit from the
-an academic license and you don't want to pay for it, you can
-use Canopy express which is a lightweight version (no 3D visualization
-support for example): https://www.enthought.com/store/.
-
-To test that everything works properly, open up IPython::
-
-    ipython --pylab qt
-
-Now that you have a working Python environment you can install MNE.
-
-The first decision you must make is whether you want the most recent stable version or the 
-development version (this contains new features, however the function names and usage examples
-may not be fully settled).
-
-Stable Version Instructions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-You can also install the latest stable version with with pip::
-
-    pip install mne --upgrade
-    
-Now that you have installed mne, check and optimize the installation (:ref:`check_and_optimize`)
-
-Development Version Instructions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If you know you would like to contribute to the project please follow the instructions here: 
-:ref:`using-git`
-
-If you just want to start using the latest development version (the most up to date)::
-
-    pip install -e git+https://github.com/mne-tools/mne-python#egg=mne-dev
-
-.. _check_and_optimize:
-
-Check and Optimize Installation
--------------------------------
-
-To check that everything went fine, in ipython, type::
-
-    >>> import mne
-
-If you get a new prompt with no error messages, you should be good to go.
-
-CUDA Optimization
-^^^^^^^^^^^^^^^^^
-
-If you want to use NVIDIA CUDA for filtering (can yield 3-4x speedups), you'll
-need to install the NVIDIA toolkit on your system, and then both pycuda and
-scikits.cuda, see:
-
-https://developer.nvidia.com/cuda-downloads
-
-http://mathema.tician.de/software/pycuda
-
-http://wiki.tiker.net/PyCuda/Installation/
-
-https://github.com/lebedov/scikits.cuda
-
-To initialize mne-python cuda support, after installing these dependencies
-and running their associated unit tests (to ensure your installation is correct)
-you can run:
-
-    >>> mne.cuda.init_cuda() # doctest: +SKIP
-
-If you have everything installed correctly, you should see an INFO-level log
-message telling you your CUDA hardware's available memory. To have CUDA
-initialized on startup, you can do:
-
-    >>> mne.utils.set_config('MNE_USE_CUDA', 'true') # doctest: +SKIP
-
-You can test if MNE CUDA support is working by running the associated test:
-
-    nosetests mne/tests/test_filter.py
-
-If all tests pass with none skipped, then mne-python CUDA support works.
-
-
-.. _detailed_notes:
-
-Detailed Notes
---------------
-
-MNE is written in pure Python making it easy to setup on
-any machine with Python >=2.6, NumPy >= 1.6, SciPy >= 0.7.2
-and matplotlib >= 1.1.0.
-
-Some isolated functions (e.g. filtering with firwin2) require SciPy >= 0.9.
-
-To run all documentation examples the following additional packages are required:
-
-    * PySurfer (for visualization of source estimates on cortical surfaces)
-
-    * scikit-learn (for supervised and unsupervised machine learning functionality)
-
-    * pandas >= 0.8 (for export to tabular data structures like excel files)
-
-Note. For optimal performance we recommend installing recent versions of
-NumPy (> 1.7), SciPy (> 0.10) and scikit-learn (>= 0.14).
-
-Development Environment
-^^^^^^^^^^^^^^^^^^^^^^^
-
-Note that we explicitly support the following Python setups since they reflect our
-development environments and functionality is best tested for them:
-
-    * EPD 7.3 (Mac, Linux)
-
-    * Canopy >= 1.0 (Mac, Linux)
-
-    * Anaconda (Mac)
-
-    * Debian / Ubuntu standard system Python + Scipy stack
-
-Anaconda
-^^^^^^^^
-
-Note for developers. To make Anaconda working with our test-suite a few
-manual adjustments might be necessary. This may require
-manually adjusting the python interpreter invoked by the nosetests and
-the sphinx-build 'binaries' (http://goo.gl/Atqh26).
-Tested on a recent MacBook Pro running Mac OS X 10.8 and Mac OS X 10.9
-
-multi-threading
-^^^^^^^^^^^^^^^
-
-For optimal performance we recommend using numpy / scipy with the multi-threaded
-ATLAS, gotoblas2, or intel MKL. For example, the Enthought Canopy and the Anaconda distributions
-ship with tested MKL-compiled numpy / scipy versions. Depending on the use case and your system
-this may speed up operations by a factor greater than 10.
-
-pylab
-^^^^^
-
-Although all of the examples in this documentation are in the style
-of the standard Python interpreter, the use of IPython with the pylab option
-is highly recommended. In addition, for the setups listed above we would
-strongly recommend to use the QT matplotlib backend for fast and correct rendering::
-
-    ipython --pylab qt
-
-
-On Linux, for example, QT is the only matplotlib backend for which 3D rendering
-will work correctly. On Mac OS X for other backends certain matplotlib functions
-might not work as expected.
-
-IPython notebooks
-^^^^^^^^^^^^^^^^^
-
-To take full advantage of MNE-Python's visualization capacities in combination
-with IPython notebooks and inline displaying, please explicitly add the
-following magic method invocation to your notebook or configure your notebook
-runtime accordingly.
-
-    %pylab inline
-
-If you use another Python setup and you encounter some difficulties please
-report them on the MNE mailing list or on github to get assistance.
-
-
-.. _inside_martinos:
-
-Inside the Martinos Center
---------------------------
-
-For people within the MGH/MIT/HMS Martinos Center mne is available on the network.
-
-In a terminal do::
-
-    setenv PATH /usr/pubsw/packages/python/epd/bin:${PATH}
-
-If you use Bash replace the previous instruction with::
-
-    export PATH=/usr/pubsw/packages/python/epd/bin:${PATH}
-
-Then start the python interpreter with:
-
-    ipython
-
-Then type::
-
-    >>> import mne
-
-If you get a new prompt with no error messages, you should be good to go.
-Start with the :ref:`examples-index`.
diff --git a/doc/source/index.rst b/doc/source/index.rst
deleted file mode 100644
index 47a9194..0000000
--- a/doc/source/index.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-========
-MNE Home
-========
-
-MNE is a software package for processing magnetoencephalography
-(MEG) and electroencephalography (EEG) data. 
-
-The MNE software computes cortically-constrained L2 minimum-norm
-current estimates and associated dynamic statistical parametric maps
-from MEG and EEG data, optionally constrained by fMRI. 
-
-This software includes MEG and EEG preprocessing tools, interactive
-and batch-mode modules for the forward and inverse calculations, as
-well as various data conditioning and data conversion utilities. These
-tools are provided as compiled C code for the LINUX and Mac OSX
-operating systems.
-
-In addition to the compiled C code tools, MNE Software includes a
-Matlab toolbox which facilitates access to the fif (functional image
-file) format data files employed in our software and enables
-development of custom analysis tools based on the intermediate results
-computed with the MNE tools. 
-
-The third and newest component of MNE is MNE-Python which implements
-all the functionality of the MNE Matlab tools in Python and extends
-the capabilities of the MNE Matlab tools to, e.g., frequency-domain
-and time-frequency analyses and non-parametric statistics. This
-component of MNE is presently evolving quickly and thanks to the
-adopted open development environment user contributions can be easily
-incorporated.
-
-The MNE development is supported by National Institute of Biomedical Imaging and Bioengineering 
-grants 5R01EB009048 and P41EB015896 (Center for Functional Neuroimaging Technologies) as well as 
-NSF awards 0958669 and 1042134.
-
-The Matlab and Python components of MNE are provided under the
-simplified BSD license.
-
-
-
-
-  * `Download <http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/index.php>`_ MNE
-  * Read the :ref:`manual`.
-  * Get started with :ref:`mne_python`
-  * :ref:`command_line_tutorial`
-  * Join the MNE `mailing list <http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis>`_
-  * `Help/Feature Request/Bug Report <mailto:mne_support at nmr.mgh.harvard.edu>`_
-  * :ref:`ch_reading`
-
-.. toctree::
-   :maxdepth: 2
-
-   manual
-   mne-python
-   cite
-
diff --git a/doc/source/manual.rst b/doc/source/manual.rst
deleted file mode 100644
index 9c39ce1..0000000
--- a/doc/source/manual.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-.. _manual:
-
-Manual
-======
-
-.. toctree::
-   :maxdepth: 1
-
-   manual/intro
-   manual/list
-   manual/cookbook
-   manual/browse
-   manual/forward
-   manual/mne
-   manual/analyze
-   manual/morph
-   manual/convert
-   manual/matlab
-   manual/utilities
-   manual/sampledata
-   manual/reading
-   manual/AppA
-   manual/AppB
-   manual/AppInstall
-   manual/AppReleaseNotes
-   manual/AppEULA
-   command_line_tutorial
-
diff --git a/doc/source/manual/AppB.rst b/doc/source/manual/AppB.rst
deleted file mode 100644
index c89aa88..0000000
--- a/doc/source/manual/AppB.rst
+++ /dev/null
@@ -1,294 +0,0 @@
-
-
-.. _setup_martinos:
-
-============================
-Setup at the Martinos Center
-============================
-
-This Appendix contains information specific to the Martinos
-Center setup.
-
-.. _user_environment_martinos:
-
-User environment
-################
-
-In the Martinos Center computer network, the 2.7 version
-of MNE is located at /usr/pubsw/packages/mne/stable. To use this
-version, follow :ref:`user_environment` substituting /usr/pubsw/packages/mne/stable
-for <*MNE*> and /usr/pubsw/packages/matlab/current
-for <*Matlab*> . For most users,
-the default shell is tcsh.
-
-.. note:: A new version of MNE is build every night from    the latest sources. This version is located at /usr/pubsw/packages/mne/nightly.
-
-.. _BABGFDJG:
-
-Using Neuromag software
-#######################
-
-Software overview
-=================
-
-The complete set of Neuromag software is available on the
-LINUX workstations. The programs can be accessed from the command
-line, see :ref:`BABFIEHC`. The corresponding manuals, located
-at ``$NEUROMAG_ROOT/manuals`` are listed in :ref:`BABCJJGF`.
-
-.. _BABFIEHC:
-
-.. table:: Principal Neuromag software modules.
-
-    ===========  =================================
-    Module       Description
-    ===========  =================================
-    xfit         Source modelling
-    xplotter     Data plotting
-    graph        General purpose data processor
-    mrilab       MEG-MRI integration
-    seglab       MRI segmentation
-    cliplab      Graphics clipboard
-    ===========  =================================
-
-.. _BABCJJGF:
-
-.. table:: List of Neuromag software manuals.
-
-    ===========  =========================================
-    Module       pdf
-    ===========  =========================================
-    xfit         XFit.pdf
-    xplotter     Xplotter.pdf
-    graph        GraphUsersGuide.pdf GraphReference.pdf
-    mrilab       Mrilab.pdf
-    seglab       Seglab.pdf
-    cliplab      Cliplab.pdf
-    ===========  =========================================
-
-To access the Neuromag software on the LINUX workstations
-in the Martinos Center, say (in tcsh or csh)
-
-``source /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_csh``
-
-or in POSIX shell
-
-``. /space/orsay/8/megdev/Neuromag-LINUX/neuromag_setup_sh``
-
-Using MRIlab for coordinate system alignment
-============================================
-
-The MEG-MRI coordinate system alignment can be also accomplished with
-the Neuromag tool MRIlab, part of the standard software on Neuromag
-MEG systems.
-
-In MRIlab, the following steps are necessary for the coordinate
-system alignment:
-
-- Load the MRI description file ``COR.fif`` from ``subjects/sample/mri/T1-neuromag/sets`` through File/Open .
-
-- Open the landmark setting dialog from Windows/Landmarks .
-
-- Click on one of the coordinate setting fields on the Nasion line.
-  Click Goto . Select the crosshair
-  tool and move the crosshair to the nasion. Click Get .
-
-- Proceed similarly for the left and right auricular points.
-  Your instructor will help you with the selection of the correct
-  points.
-
-- Click OK to set the alignment
-
-- Load the digitization data from the file ``sample_audvis_raw.fif`` or ``sample_audvis-ave.fif`` (the
-  on-line evoked-response average file) in ``MEG/sample`` through File/Import/Isotrak data . Click Make points to
-  show all the digitization data on the MRI slices.
-
-- Check that the alignment is correct by looking at the locations
-  of the digitized points are reasonable. Adjust the landmark locations
-  using the Landmarks dialog, if
-  necessary.
-
-- Save the aligned file to the file suggested in the dialog
-  coming up from File/Save .
-
-Mature software
-###############
-
-This Section contains documentation for software components,
-which are still available in the MNE software but have been replaced
-by new programs.
-
-.. _BABDABHI:
-
-mne_compute_mne
-===============
-
-This chapter contains information about the options accepted
-by the program mne_compute_mne ,
-which is gradually becoming obsolete. All of its functions will
-be eventually included to mne_make_movie ,
-see :ref:`CBBECEDE`. At this time, mne_compute_mne is
-still needed to produce time-collapsed w files unless you are willing
-to write a Matlab script of your own for this purpose.
-
-mne_compute_mne accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---inv <*name*>**
-
-    Load the inverse operator decomposition from here.
-
-**\---meas <*name*>**
-
-    Load the MEG or EEG data from this file.
-
-**\---set <*number*>**
-
-    The data set (condition) number to load. The list of data sets can
-    be seen, *e.g.*, in mne_analyze , mne_browse_raw ,
-    and xplotter .
-
-**\---bmin <*time/ms*>**
-
-    Specifies the starting time of the baseline. In order to activate
-    baseline correction, both ``--bmin`` and ``--bmax`` options
-    must be present.
-
-**\---bmax <*time/ms*>**
-
-    Specifies the finishing time of the baseline.
-
-**\---nave <*value*>**
-
-    Specifies the number of averaged epochs in the input data. If the input
-    data file is one produced by mne_process_raw or mne_browse_raw ,
-    the number of averages is correct in the file. However, if subtractions
-    or some more complicated combinations of simple averages are produced, *e.g.*,
-    by using the xplotter software, the
-    number of averages should be manually adjusted. This is accomplished
-    either by employing this flag or by adjusting the number of averages
-    in the data file with help of mne_change_nave .
-
-**\---snr <*value*>**
-
-    An estimate for the amplitude SNR. The regularization parameter will
-    be set as :math:`\lambda = ^1/_{\text{SNR}}`. If the SNR option is
-    absent, the regularization parameter will be estimated from the
-    data. The regularization parameter will be then time dependent.
-
-**\---snronly**
-
-    Only estimate SNR and output the result into a file called SNR. Each
-    line of the file contains three values: the time point in ms, the estimated
-    SNR + 1, and the regularization parameter estimated from the data
-    at this time point.
-
-**\---abs**
-
-    Calculate the absolute value of the current and the dSPM for fixed-orientation
-    data.
-
-**\---spm**
-
-    Calculate the dSPM instead of the expected current value.
-
-**\---chi2**
-
-    Calculate an approximate :math:`\chi_2^3` statistic
-    instead of the *F* statistic. This is simply
-    accomplished by multiplying the *F* statistic
-    by three.
-
-**\---sqrtF**
-
-    Take the square root of the :math:`\chi_2^3` or *F* statistic
-    before outputting the stc file.
-
-**\---collapse**
-
-    Make all frames in the stc file (or the wfile) identical. The value
-    at each source location is the maximum value of the output quantity
-    at this location over the analysis period. This option is convenient
-    for determining the correct thresholds for the rendering of the
-    final brain-activity movies.
-
-**\---collapse1**
-
-    Make all frames in the stc file (or the wfile) identical. The value
-    at each source location is the :math:`L_1` norm
-    of the output quantity at this location over the analysis period.
-
-**\---collapse2**
-
-    Make all frames in the stc file (or the wfile) identical. The value
-    at each source location is the :math:`L_2` norm
-    of the output quantity at this location over the analysis period.
-
-**\---SIcurrents**
-
-    Output true current values in SI units (Am). By default, the currents are
-    scaled so that the maximum current value is set to 50 (Am).
-
-**\---out <*name*>**
-
-    Specifies the output file name. This is the 'stem' of
-    the output file name. The actual name is derived by removing anything up
-    to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. Finally, ``.stc`` or ``.w`` is added,
-    depending on the output file type.
-
-**\---wfiles**
-
-    Use binary w-files in the output whenever possible. The noise-normalization
-    factors can be always output in this format.  The current estimates
-    and dSPMs can be output as wfiles if one of the collapse options
-    is selected.
-
-**\---pred <*name*>**
-
-    Save the predicted data into this file. This is a fif file containing
-    the predicted data waveforms, see :ref:`CHDCACDC`.
-
-**\---outputnorm <*name*>**
-
-    Output noise-normalization factors to this file.
-
-**\---invnorm**
-
-    Output inverse noise-normalization factors to the file defined by
-    the ``--outputnorm`` option.
-
-**\---dip <*name*>**
-
-    Specifies a dipole distribution snapshot file. This is a file containing the
-    current distribution at a time specified with the ``--diptime`` option.
-    The file format is the ASCII dip file format produced by the Neuromag
-    source modelling software (xfit). Therefore, the file can be loaded
-    to the Neuromag MRIlab MRI viewer to display the actual current
-    distribution. This option is only effective if the ``--spm`` option
-    is absent.
-
-**\---diptime <*time/ms*>**
-
-    Time for the dipole snapshot, see ``--dip`` option above.
-
-**\---label <*name*>**
-
-    Label to process. The label files are produced by tksurfer and specify
-    regions of interests (ROIs). A label file name should end with ``-lh.label`` for
-    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
-    ones. The corresponding output files are tagged with ``-lh-`` <*data type* ``.amp`` and ``-rh-`` <*data type* ``.amp`` , respectively. <*data type*> equals ``MNE`` for expected current
-    data and ``spm`` for dSPM data. Each line of the output
-    file contains the waveform of the output quantity at one of the
-    source locations falling inside the ROI.
-
-.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_compute_mne have    been removed. mne_compute_mne can now    process only the entire averaged epoch.
diff --git a/doc/source/manual/AppInstall.rst b/doc/source/manual/AppInstall.rst
deleted file mode 100644
index ec35def..0000000
--- a/doc/source/manual/AppInstall.rst
+++ /dev/null
@@ -1,174 +0,0 @@
-
-
-.. _install_config:
-
-==============================
-Installation and configuration
-==============================
-
-System requirements
-###################
-
-The MNE software runs on Mac OSX and LINUX operating systems.
-The hardware and software requirements are:
-
-- Mac OSX version 10.5 (Leopard) or later.
-
-- LINUX kernel 2.6.9 or later
-
-- On both LINUX and Mac OSX 32-bit and 64-bit Intel platforms
-  are supported. PowerPC version on Mac OSX can be provided upon request.
-
-- At least 2 GB of memory, 4 GB or more recommended.
-
-- Disk space required for the MNE software: 80 MB
-
-- Additional open source software on Mac OSX, see :ref:`BABDBCJE`.
-
-Installation
-############
-
-The MNE software is distributed as a compressed tar archive
-(Mac OSX and LINUX) or a Mac OSX disk image (dmg).
-
-Download the software
-=====================
-
-Download the software package of interest. The file names
-follow the convention:
-
-MNE-* <*version*>*- <*rev*> -* <*Operating
-system*>*-* <*Processor*>*.* <*ext*>*
-
-The present version number is 2.7.0. The <*rev*> field
-is the SVN revision number at the time this package was created.
-The <*Operating system*> field
-is either Linux or MacOSX. The <*processor*> field
-is either i386 or x86_64. The <*ext*> field
-is 'gz' for compressed tar archive files and 'dmg' for
-Mac OSX disk images.
-
-Installing from a compressed tar archive
-========================================
-
-Go to the directory where you want the software to be installed:
-
-``cd`` <*dir*>
-
-Unpack the tar archive:
-
-``tar zxvf`` <*software package*>
-
-The name of the software directory under <*dir*> will
-be the same as the package file without the .gz extension.
-
-Installing from a Mac OSX disk  image
-=====================================
-
-- Double click on the disk image file.
-  A window opens with the installer package ( <*name*> .pkg)
-  inside.
-
-- Double click the the package file. The installer starts.
-
-- Follow the instructions in the installer.
-
-.. note:: The software will be installed to /Applications/ <*name*> by    default. If you want another location, select Choose Folder... on the Select a Destination screen    in the installer.
-
-.. note:: To provide centralized support in an environment    with
-
-.. _BABDBCJE:
-
-Additional software
-===================
-
-MNE uses the 'Netpbm' package (http://netpbm.sourceforge.net/)
-to create image files in formats other than tif and rgb from mne_analyze and mne_browse_raw .
-This package is usually present on LINUX systems. On Mac OSX, you
-need to install the netpbm package. The recommended way to do this
-is to use the MacPorts Project tools, see http://www.macports.org/:
-
-- If you have not installed the MacPorts
-  software, goto http://www.macports.org/install.php and follow the
-  instructions to install MacPorts.
-
-- Install the netpbm package by saying: ``sudo port install netpbm``
-
-MacPorts requires that you have the XCode developer tools
-and X11 windowing environment installed. X11 is also needed by MNE.
-For Mac OSX Leopard, we recommend using XQuartz (http://xquartz.macosforge.org/).
-As of this writing, XQuartz does not yet exist for SnowLeopard;
-the X11 included with the operating system is sufficient.
-
-.. _CIHIIBDA:
-
-Testing the performance of your OpenGL graphics
-===============================================
-
-The graphics performance of mne_analyze depends
-on your graphics software and hardware configuration. You get the
-best performance if you are using mne_analyze locally
-on a computer and the hardware acceleration capabilities are in
-use. You can check the On GLX... item
-in the help menu of mne_analyze to
-see whether the hardware acceleration is in effect. If the dialog
-popping up says Direct rendering context ,
-you are using hardware acceleration. If this dialog indicates Nondirect rendering context , you are either using software
-emulation locally, rendering to a remote display, or employing VNC
-connection. If you are rendering to a local display and get an indication
-of Nondirect rendering context ,
-software emulation is in effect and you should contact your local
-computer support to enable hardware acceleration for GLX. In some
-cases, this may require acquiring a new graphics display card. Fortunately,
-relatively high-performance OpenGL-capable graphics cards very inexpensive.
-
-There is also an utility mne_opengl_test to
-assess the graphics performance more quantitatively. This utility
-renders an inflated brain surface repeatedly, rotating it by 5 degrees
-around the *z* axis between redraws. At each
-revolution, the time spent for the full revolution is reported on
-the terminal window where mne_opengl_test was
-started from. The program renders the surface until the interrupt
-key (usually control-c) is pressed on the terminal window.
-
-mne_opengl_test is located
-in the ``bin`` directory and is thus started as:
-
-``$MNE_ROOT/bin/mne_opengl_test``
-
-On the fastest graphics cards, the time per revolution is
-well below 1 second. If this time longer than 10 seconds either
-the graphics hardware acceleration is not in effect or you need
-a faster graphics adapter.
-
-Obtain FreeSurfer
-#################
-
-The MNE software relies on the FreeSurfer software for cortical
-surface reconstruction and other MRI-related tasks. Please consult
-the FreeSurfer home page site at ``http://surfer.nmr.mgh.harvard.edu/`` .
-
-How to get started
-##################
-
-After you have installed the software, a good place to start
-is to look at the manual:
-
-- Source the correct setup script, see :ref:`user_environment`,
-  and
-
-- Say: ``mne_view_manual`` .
-
-Chapters of interest for a novice user include:
-
-- :ref:`CHDDEFAB` and :ref:`CHDBAFGJ` contain introduction
-  to the software and setup instructions.
-
-- :ref:`ch_cookbook` is an overview of the necessary steps to
-  compute the cortically constrained minimum-norm solutions.
-
-- :ref:`ch_sample_data` is a hands-on exercise demonstrating analysis
-  of the sample data set.
-
-- :ref:`ch_reading` contains a list of useful references for
-  understanding the methods implemented in the MNE software.
diff --git a/doc/source/manual/convert.rst b/doc/source/manual/convert.rst
deleted file mode 100644
index 67c5f45..0000000
--- a/doc/source/manual/convert.rst
+++ /dev/null
@@ -1,2312 +0,0 @@
-
-
-.. _ch_convert:
-
-===============
-Data conversion
-===============
-
-Overview
-########
-
-This Chapter describes the data conversion utilities included
-with the MNE software.
-
-.. _BEHIAADG:
-
-Importing data from other MEG/EEG systems
-#########################################
-
-This section describes the utilities to convert data from
-other MEG/EEG systems into the fif format.
-
-Importing 4-D Neuroimaging data
-===============================
-
-The newest version of 4-D Magnes software includes the possibility
-to export data in fif. Please consult the documentation of the Magnes
-system for details of this export utility. However, the exported
-fif file does not include information about the compensation channels
-and the weights to be applied to realize software gradient compensation.
-To augment the Magnes fif files with the necessary information,
-the MNE software includes the utilities mne_insert_4D_comp , mne_create_comp_data ,
-and mne_add_to_meas_info.
-
-As a result, the complete 4D Magnes data conversion process
-involves the following steps:
-
-- Export the raw data fif file from the
-  Magnes system.
-
-- If the data comes from a Magnes system where the primary (helmet) sensors
-  are gradiometers instead of magnetometers, run mne_fix_mag_coil_types with
-  the ``--magnes`` option to correct the channel information
-  in the file, see :ref:`CHDGAAJC`.
-
-- Export a text file containing the Magnes compensation sensor
-  data.
-
-- Create a text file containing the appropriate compensation
-  channel weights.
-
-- Run mne_insert_4D_comp with
-  the files created in the first two steps to merge compensation channel
-  data with the original Magnes fif file.
-
-- Run mne_create_comp_data on
-  the file created in step 3. to make a fif file containing the compensation
-  weights.
-
-- Run mne_add_to_meas_info with
-  the fif files created in steps 4. and 5. as input to result in a
-  complete fif file containing all the necessary data.
-
-.. note:: Including the compensation channel data is recommended    but not mandatory. If the data are saved in the Magnes system are    already compensated, there will be a small error in the forward    calculations whose significance has not been evaluated carefully    at this time.
-
-.. _BEHDEBCH:
-
-Importing CTF data
-==================
-
-The MNE software includes a utility mne_ctf2fiff ,
-based on the BrainStorm Matlab code by Richard Leahy, John Mosher,
-and Sylvain Baillet, to convert data in CTF ds directory to fif
-format.
-
-The command-line options of mne_ctf2fiff are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---verbose**
-
-    Produce a verbose listing of the conversion process to stdout.
-
-**\---ds <*directory*>**
-
-    Read the data from this directory
-
-**\---omit <*filename*>**
-
-    Read the names of channels to be omitted from this text file. Enter one
-    channel name per line. The names should match exactly with those
-    listed in the CTF data structures. By default, all channels are included.
-
-**\---fif <*filename*>**
-
-    The name of the output file. If the length of the raw data exceeds
-    the 2-GByte fif file limit, several output files will be produced.
-    These additional 'extension' files will be tagged
-    with ``_001.fif`` , ``_002.fif`` , etc.
-
-**\---evoked**
-
-    Produce and evoked-response fif file instead of a raw data file.
-    Each trial in the CTF data file is included as a separate category
-    (condition). The maximum number of samples in each trial is limited
-    to 25000.
-
-**\---infoonly**
-
-    Write only the measurement info to the output file, do not include data.
-
-During conversion, the following files are consulted from
-the ds directory:
-
-** <*name*> .res4**
-
-    This file contains most of the header information pertaining the acquisition.
-
-** <*name*> .hc**
-
-    This file contains the HPI coil locations in sensor and head coordinates.
-
-** <*name*> .meg4**
-
-    This file contains the actual MEG data. If the data are split across several
-    files due to the 2-GByte file size restriction, the 'extension' files
-    are called <*name*> ``.`` <*number*> ``_meg4`` .
-
-** <*name*> .eeg**
-
-    This is an optional input file containing the EEG electrode locations. More
-    details are given below.
-
-If the <*name*> ``.eeg`` file,
-produced from the Polhemus data file with CTF software, is present,
-it is assumed to contain lines with the format:
-
- <*number*> <*name*> <*x/cm*> <*y/cm*> <*z/cm*>
-
-The field <*number*> is
-a sequential number to be assigned to the converted data point in
-the fif file. <*name*> is either
-a name of an EEG channel, one of ``left`` , ``right`` ,
-or ``nasion`` to indicate a fiducial landmark, or any word
-which is not a name of any channel in the data. If <*name*> is
-a name of an EEG channel available in the data, the location is
-included in the Polhemus data as an EEG electrode locations and
-inserted as the location of the EEG electrode. If the name is one
-of the fiducial landmark names, the point is included in the Polhemus
-data as a fiducial landmark. Otherwise, the point is included as
-an additional head surface points.
-
-The standard ``eeg`` file produced by CTF software
-does not contain the fiducial locations. If desired, they can be
-manually copied from the ``pos`` file which was the source
-of the ``eeg`` file.
-
-.. note:: In newer CTF data the EEG position information    maybe present in the ``res4`` file. If the ``eeg`` file    is present, the positions given there take precedence over the information    in the ``res4`` file.
-
-.. note:: mne_ctf2fiff converts    both epoch mode and continuous raw data file into raw data fif files.    It is not advisable to use epoch mode files with time gaps between    the epochs because the data will be discontinuous in the resulting    fif file with jumps at the junctions between epochs. These discontinuities    produce artefacts if the raw data is filtered in mne_browse_raw , mne_process_raw ,    or graph .
-
-.. note:: The conversion process includes a transformation    from the CTF head coordinate system convention to that used in the    Neuromag systems.
-
-.. _BEHBABFA:
-
-Importing CTF Polhemus data
-===========================
-
-The CTF MEG systems store the Polhemus digitization data
-in text files. The utility mne_ctf_dig2fiff was
-created to convert these data files into the fif and hpts formats.
-
-The input data to mne_ctf_dig2fiff is
-a text file, which contains the coordinates of the digitization
-points in centimeters. The first line should contain a single number
-which is the number of points listed in the file. Each of the following
-lines contains a sequential number of the point, followed by the
-three coordinates. mne_ctf_dig2fiff ignores
-any text following the :math:`z` coordinate
-on each line. If the ``--numfids`` option is specified,
-the first three points indicate the three fiducial locations (1
-= nasion, 2 = left auricular point, 3 = right auricular point).
-Otherwise, the input file must end with three lines beginning with ``left`` , ``right`` ,
-or ``nasion`` to indicate the locations of the fiducial
-landmarks, respectively.
-
-.. note:: The sequential numbers should be unique within    a file. I particular, the numbers 1, 2, and 3 must not be appear    more than once if the ``--numfids`` options is used.
-
-The command-line options for mne_ctf_dig2fiff are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---dig <*name*>**
-
-    Specifies the input data file in CTF output format.
-
-**\---numfids**
-
-    Fiducial locations are numbered instead of labeled, see above.
-
-**\---hpts <*name*>**
-
-    Specifies the output hpts file. The format of this text file is
-    described in :ref:`CJADJEBH`.
-
-**\---fif <*name*>**
-
-    Specifies the output fif file.
-
-.. _BEHDDFBI:
-
-Applying software gradient compensation
-=======================================
-
-Since the software gradient compensation employed in CTF
-systems is a reversible operation, it is possible to change the
-compensation status of CTF data in the data files as desired. This
-section contains information about the technical details of the
-compensation procedure and a description of mne_compensate_data ,
-which is a utility to change the software gradient compensation
-state in evoked-response data files.
-
-The fif files containing CTF data converted using the utility mne_ctf2fiff contain
-several compensation matrices which are employed to suppress external disturbances
-with help of the reference channel data. The reference sensors are
-located further away from the brain than the helmet sensors and
-are thus measuring mainly the external disturbances rather than magnetic
-fields originating in the brain. Most often, a compensation matrix
-corresponding to a scheme nicknamed *Third-order gradient
-compensation* is employed.
-
-Let us assume that the data contain :math:`n_1` MEG
-sensor channels, :math:`n_2` reference sensor
-channels, and :math:`n_3` other channels.
-The data from all channels can be concatenated into a single vector
-
-.. math::    x = [x_1^T x_2^T x_3^T]^T\ ,
-
-where :math:`x_1`, :math:`x_2`,
-and :math:`x_3` are the data vectors corresponding
-to the MEG sensor channels, reference sensor channels, and other
-channels, respectively. The data before and after compensation,
-denoted here by :math:`x_{(0)}` and :math:`x_{(k)}`, respectively,
-are related by
-
-.. math::    x_{(k)} = M_{(k)} x_{(0)}\ ,
-
-where the composite compensation matrix is
-
-.. math::    M_{(k)} = \begin{bmatrix}
-		I_{n_1} & C_{(k)} & 0 \\
-		0 & I_{n_2} & 0 \\
-		0 & 0 & I_{n_3}
-		\end{bmatrix}\ .
-
-In the above, :math:`C_{(k)}` is a :math:`n_1` by :math:`n_2` compensation
-data matrix corresponding to compensation "grade" :math:`k`.
-It is easy to see that
-
-.. math::    M_{(k)}^{-1} = \begin{bmatrix}
-		I_{n_1} & -C_{(k)} & 0 \\
-		0 & I_{n_2} & 0 \\
-		0 & 0 & I_{n_3}
-		\end{bmatrix}\ .
-
-To convert from compensation grade :math:`k` to :math:`p` one
-can simply multiply the inverse of one compensate compensation matrix
-by another and apply the product to the data:
-
-.. math::    x_{(k)} = M_{(k)} M_{(p)}^{-1} x_{(p)}\ .
-
-This operation is performed by mne_compensate_data ,
-which has the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in <*name*>**
-
-    Specifies the input data file.
-
-**\---out <*name*>**
-
-    Specifies the output data file.
-
-**\---grad <*number*>**
-
-    Specifies the desired compensation grade in the output file. The value
-    can be 1, 2, 3, or 101. The values starting from 101 will be used
-    for 4D Magnes compensation matrices.
-
-.. note:: Only average data is included in the output.    Evoked-response data files produced with mne_browse_raw or mne_process_raw may    include standard errors of mean, which can not be re-compensated    using the above method and are thus omitted.
-
-.. note:: Raw data cannot be compensated using mne_compensate_data .    For this purpose, load the data to mne_browse_raw or mne_process_raw , specify    the desired compensation grade, and save a new raw data file.
-
-.. _BEHGDDBH:
-
-Importing Magnes compensation channel data
-==========================================
-
-At present, it is not possible to include reference channel
-data to fif files containing 4D Magnes data directly using the conversion
-utilities available for the Magnes systems. However, it is possible
-to export the compensation channel signals in text format and merge
-them with the MEG helmet channel data using mne_insert_4D_comp .
-This utility has the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in <*name*>**
-
-    Specifies the input fif file containing the helmet sensor data.
-
-**\---out <*name*>**
-
-    Specifies the output fif file which will contain both the helmet
-    sensor data and the compensation channel data.
-
-**\---ref <*name*>**
-
-    Specifies a text file containing the reference sensor data.
-
-Each line of the reference sensor data file contains the
-following information:
-
-**epoch #**
-
-    is
-    always one,
-
-**time/s**
-
-    time point of this sample,
-
-**data/T**
-
-    the reference channel data
-    values.
-
-The standard locations of the MEG (helmet) and compensation
-sensors in a Magnes WH3600 system are listed in ``$MNE_ROOT/share/mne/Magnes_WH3600.pos`` . mne_insert_4D_comp matches
-the helmet sensor positions in this file with those present in the
-input data file and transforms the standard compensation channel
-locations accordingly to be included in the output. Since a standard
-position file is only provided for Magnes WH600, mne_insert_4D_comp only
-works for that type of a system.
-
-The fif files exported from the Magnes systems may contain
-slightly smaller number of samples than originally acquired because
-the total number of samples may not be evenly divisible with a reasonable
-number of samples which will be used as the fif raw data file buffer
-size. Therefore, the reference channel data may contain more samples
-than the fif file. The superfluous samples will be omitted from
-the end.
-
-.. _BEHBIIFF:
-
-Creating software gradient compensation data
-============================================
-
-The utility mne_create_comp_data was
-written to create software gradient compensation weight data for
-4D Magnes fif files. This utility takes a text file containing the
-compensation data as input and writes the corresponding fif file
-as output. This file can be merged into the fif file containing
-4D Magnes data with the utility mne_add_to_meas_info .
-
-The command line options of mne_create_comp_data are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in <*name*>**
-
-    Specifies the input text file containing the compensation data.
-
-**\---kind <*value*>**
-
-    The compensation type to be stored in the output file with the data. This
-    value defaults to 101 for the Magnes compensation and does not need
-    to be changed.
-
-**\---out <*name*>**
-
-    Specifies the output fif file containing the compensation channel weight
-    matrix :math:`C_{(k)}`, see :ref:`BEHDDFBI`.
-
-The format of the text-format compensation data file is:
-
- <*number of MEG helmet channels*> <*number of compensation channels included*>
- <*cname_1*> <*cname_2*> ...
- <*name_1*> <*weights*>
- <*name_2*> <*weights*> ...
-
-In the above <*name_k*> denote
-names of MEG helmet channels and <*cname_k*>
-those of the compensation channels, respectively. If the channel
-names contain spaces, they must be surrounded by quotes, for example, ``"MEG 0111"`` .
-
-.. _BEHBJGGF:
-
-Importing KIT MEG system data
-=============================
-
-The utility mne_kit2fiff was
-created in collaboration with Alec Maranz and Asaf Bachrach to import
-their MEG data acquired with the 160-channel KIT MEG system to MNE
-software.
-
-To import the data, the following input files are mandatory:
-
-- The Polhemus data file (elp file)
-  containing the locations of the fiducials and the head-position
-  indicator (HPI) coils. These data are usually given in the CTF/4D
-  head coordinate system. However, mne_kit2fiff does
-  not rely on this assumption. This file can be exported directly from
-  the KIT system.
-
-- A file containing the locations of the HPI coils in the MEG
-  device coordinate system. These data are used together with the elp file
-  to establish the coordinate transformation between the head and
-  device coordinate systems. This file can be produced easily by manually
-  editing one of the files exported by the KIT system.
-
-- A sensor data file (sns file)
-  containing the locations and orientations of the sensors. This file
-  can be exported directly from the KIT system.
-
-.. note:: The output fif file will use the Neuromag head    coordinate system convention, see :ref:`BJEBIBAI`. A coordinate    transformation between the CTF/4D head coordinates and the Neuromag    head coordinates is included. This transformation can be read with    MNE Matlab Toolbox routines, see :ref:`ch_matlab`.
-
-The following input files are optional:
-
-- A head shape data file (hsp file)
-  containing locations of additional points from the head surface.
-  These points must be given in the same coordinate system as that
-  used for the elp file and the
-  fiducial locations must be within 1 mm from those in the elp file.
-
-- A raw data file containing the raw data values, sample by
-  sample, as text. If this file is not specified, the output fif file
-  will only contain the measurement info block.
-
-By default mne_kit2fiff includes
-the first 157 channels, assumed to be the MEG channels, in the output
-file. The compensation channel data are not converted by default
-but can be added, together with other channels, with the ``--type`` .
-The channels from 160 onwards are designated as miscellaneous input
-channels (MISC 001, MISC 002, etc.). The channel names and types
-of these channels can be afterwards changed with the mne_rename_channels utility,
-see :ref:`CHDCFEAJ`. In addition, it is possible to synthesize
-the digital trigger channel (STI 014) from available analog
-trigger channel data, see the ``--stim`` option, below.
-The synthesized trigger channel data value at sample :math:`k` will
-be:
-
-.. math::    s(k) = \sum_{p = 1}^n {t_p(k) 2^{p - 1}}\ ,
-
-where :math:`t_p(k)` are the thresholded
-from the input channel data d_p(k):
-
-.. math::    t_p(k) = \Bigg\{ \begin{array}{l}
-		 0 \text{  if  } d_p(k) \leq t\\
-		 1 \text{  if  } d_p(k) > t
-	     \end{array}\ .
-
-The threshold value :math:`t` can
-be adjusted with the ``--stimthresh`` option, see below.
-
-mne_kit2fiff accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---elp <*filename*>**
-
-    The name of the file containing the locations of the fiducials and
-    the HPI coils. This option is mandatory.
-
-**\---hsp <*filename*>**
-
-    The name of the file containing the locations of the fiducials and additional
-    points on the head surface. This file is optional.
-
-**\---sns <*filename*>**
-
-    The name of file containing the sensor locations and orientations. This
-    option is mandatory.
-
-**\---hpi <*filename*>**
-
-    The name of a text file containing the locations of the HPI coils
-    in the MEG device coordinate frame, given in millimeters. The order of
-    the coils in this file does not have to be the same as that in the elp file.
-    This option is mandatory.
-
-**\---raw <*filename*>**
-
-    Specifies the name of the raw data file. If this file is not specified, the
-    output fif file will only contain the measurement info block.
-
-**\---sfreq <*value/Hz*>**
-
-    The sampling frequency of the data. If this option is not specified, the
-    sampling frequency defaults to 1000 Hz.
-
-**\---lowpass <*value/Hz*>**
-
-    The lowpass filter corner frequency used in the data acquisition.
-    If not specified, this value defaults to 200 Hz.
-
-**\---highpass <*value/Hz*>**
-
-    The highpass filter corner frequency used in the data acquisition.
-    If not specified, this value defaults to 0 Hz (DC recording).
-
-**\---out <*filename*>**
-
-    Specifies the name of the output fif format data file. If this file
-    is not specified, no output is produced but the elp , hpi ,
-    and hsp files are processed normally.
-
-**\---stim <*chs*>**
-
-    Specifies a colon-separated list of numbers of channels to be used
-    to synthesize a digital trigger channel. These numbers refer to
-    the scanning order channels as listed in the sns file,
-    starting from one. The digital trigger channel will be the last
-    channel in the file. If this option is absent, the output file will
-    not contain a trigger channel.
-
-**\---stimthresh <*value*>**
-
-    The threshold value used when synthesizing the digital trigger channel,
-    see above. Defaults to 1.0.
-
-**\---add <*chs*>**
-
-    Specifies a colon-separated list of numbers of channels to include between
-    the 157 default MEG channels and the digital trigger channel. These
-    numbers refer to the scanning order channels as listed in the sns file,
-    starting from one.
-
-.. note:: The mne_kit2fiff utility    has not been extensively tested yet.
-
-.. _BABHDBBD:
-
-Importing EEG data saved in the EDF, EDF+, or BDF format
-========================================================
-
-Overview
---------
-
-The mne_edf2fiff allows
-conversion of EEG data from EDF, EDF+, and BDF formats to the fif
-format. Documentation for these three input formats can be found
-at:
-
-**EDF:**
-
-    http://www.edfplus.info/specs/edf.html
-
-**EDF+:**
-
-    http://www.edfplus.info/specs/edfplus.html
-
-**BDF:**
-
-    http://www.biosemi.com/faq/file_format.htm
-
-EDF (European Data Format) and EDF+ are 16-bit formats while
-BDF is a 24-bit variant of this format used by the EEG systems manufactured
-by a company called BioSemi.
-
-None of these formats support electrode location information
-and  head shape digitization information. Therefore, this information
-has to be provided separately. Presently hpts and elp file formats
-are supported to include digitization data. For information on these
-formats, see :ref:`CJADJEBH` and http://www.sourcesignal.com/formats_probe.html.
-Note that it is mandatory to have the three fiducial locations (nasion
-and the two auricular points) included in the digitization data.
-Using the locations of the fiducial points the digitization data
-are converted to the MEG head coordinate system employed in the
-MNE software, see :ref:`BJEBIBAI`. In the comparison of the
-channel names only the initial segment up to the first '-' (dash)
-in the EDF/EDF+/BDF channel name is significant.
-
-The EDF+ files may contain an annotation channel which can
-be used to store trigger information. The Time-stamped Annotation
-Lists (TALs) on the annotation  data can be converted to a trigger
-channel (STI 014) using an annotation map file which associates
-an annotation label with a number on the trigger channel. The TALs
-can be listed with the ``--listtal`` option,
-see below.
-
-.. warning:: The data samples in a BDF file    are represented in a 3-byte (24-bit) format. Since 3-byte raw data    buffers are not presently supported in the fif format    these data will be changed to 4-byte integers in the conversion.    Since the maximum size of a fif file is 2 GBytes, the maximum size of    a BDF file to be converted is approximately 1.5 GBytes
-
-.. warning:: The EDF/EDF+/BDF formats support channel    dependent sampling rates. This feature is not supported by mne_edf2fiff .    However, the annotation channel in the EDF+ format can have a different    sampling rate. The annotation channel data is not included in the    fif files output.
-
-Using mne_edf2fiff
-------------------
-
-The command-line options of mne_edf2fiff are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---edf <*filename*>**
-
-    Specifies the name of the raw data file to process.
-
-**\---tal <*filename*>**
-
-    List the time-stamped annotation list (TAL) data from an EDF+ file here.
-    This output is useful to assist in creating the annotation map file,
-    see the ``--annotmap`` option, below.
-    This output file is an event file compatible with mne_browse_raw and mne_process_raw ,
-    see :ref:`ch_browse`. In addition, in the mapping between TAL
-    labels and trigger numbers provided by the ``--annotmap`` option is
-    employed to assign trigger numbers in the event file produced. In
-    the absence of the ``--annotmap`` option default trigger number 1024
-    is used.
-
-**\---annotmap <*filename*>**
-
-    Specify a file which maps the labels of the TALs to numbers on a trigger
-    channel (STI 014) which will be added to the output file if this
-    option is present. This annotation map file
-    may contain comment lines starting with the '%' or '#' characters.
-    The data lines contain a label-number pair, separated by a colon.
-    For example, a line 'Trigger-1:9' means that each
-    annotation labeled with the text 'Trigger-1' will
-    be translated to the number 9 on the trigger channel.
-
-**\---elp <*filename*>**
-
-    Specifies the name of the an electrode location file. This file
-    is in the "probe" file format used by the *Source
-    Signal Imaging, Inc.* software. For description of the
-    format, see http://www.sourcesignal.com/formats_probe.html. Note
-    that some other software packages may produce electrode-position
-    files with the elp ending not
-    conforming to the above specification. As discussed above, the fiducial
-    marker locations, optional in the "probe" file
-    format specification are mandatory for mne_edf2fiff .
-    When this option is encountered on the command line any previously
-    specified hpts file will be ignored.
-
-**\---hpts <*filename*>**
-
-    Specifies the name of an electrode position file in  the hpts format discussed
-    in :ref:`CJADJEBH`. The mandatory entries are the fiducial marker
-    locations and the EEG electrode locations. It is recommended that
-    electrode (channel) names instead of numbers are used to label the
-    EEG electrode locations. When this option is encountered on the
-    command line any previously specified elp file
-    will be ignored.
-
-**\---meters**
-
-    Assumes that the digitization data in an hpts file
-    is given in meters instead of millimeters.
-
-**\---fif <*filename*>**
-
-    Specifies the name of the fif file to be output.
-
-Post-conversion tasks
----------------------
-
-This section outlines additional steps to be taken to use
-the EDF/EDF+/BDF file is converted to the fif format in MNE:
-
-- Some of the channels may not have a
-  digitized electrode location associated with them. If these channels
-  are used for EOG or EMG measurements, their channel types should
-  be changed to the correct ones using the mne_rename_channels utility,
-  see :ref:`CHDCFEAJ`. EEG channels which do not have a location
-  associated with them should be assigned to be MISC channels.
-
-- After the channel types are correctly defined, a topographical
-  layout file can be created for mne_browse_raw and mne_analyze using
-  the mne_make_eeg_layout utility,
-  see :ref:`CHDDGDJA`.
-
-- The trigger channel name in BDF files is "Status".
-  This must be specified with the ``--digtrig`` option or with help of
-  the MNE_TRIGGER_CH_NAME environment variable when mne_browse_raw or mne_process_raw is
-  invoked, see :ref:`BABBGJEA`.
-
-- Only the two least significant bytes on the "Status" channel
-  of BDF files are significant as trigger information the ``--digtrigmask``
-  0xff option MNE_TRIGGER_CH_MASK environment variable should be used
-  to specify this to mne_browse_raw and mne_process_raw ,
-  see :ref:`BABBGJEA`.
-
-.. _BEHDGAIJ:
-
-Importing EEG data saved in the Tufts University format
-=======================================================
-
-The utility mne_tufts2fiff was
-created in collaboration with Phillip Holcomb and Annette Schmid
-from Tufts University to import their EEG data to the MNE software.
-
-The Tufts EEG data is included in three files:
-
-- The raw data file containing the acquired
-  EEG data. The name of this file ends with the suffix ``.raw`` .
-
-- The calibration raw data file. This file contains known calibration
-  signals and is required to bring the data to physical units. The
-  name of this file ends with the suffix ``c.raw`` .
-
-- The electrode location information file. The name of this
-  file ends with the suffix ``.elp`` .
-
-The utility mne_tufts2fiff has
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---raw <*filename*>**
-
-    Specifies the name of the raw data file to process.
-
-**\---cal <*filename*>**
-
-    The name of the calibration data file. If calibration data are missing, the
-    calibration coefficients will be set to unity.
-
-**\---elp <*filename*>**
-
-    The name of the electrode location file. If this file is missing,
-    the electrode locations will be unspecified. This file is in the "probe" file
-    format used by the *Source Signal Imaging, Inc.* software.
-    For description of the format, see http://www.sourcesignal.com/formats_probe.html.
-    The fiducial marker locations, optional in the "probe" file
-    format specification are mandatory for mne_tufts2fiff . Note
-    that some other software packages may produce electrode-position
-    files with the elp ending not
-    conforming to the above specification.
-
-.. note::
-
-    The conversion process includes a transformation from the Tufts head coordinate system convention to that used in    the Neuromag systems.
-
-.. note::
-
-    The fiducial landmark locations, optional in the probe file format, must be present for mne_tufts2fiff .
-
-.. _BEHCCCDC:
-
-Importing BrainVision EEG data
-==============================
-
-The utility mne_brain_vision2fiff was
-created to import BrainVision EEG data. This utility also helps
-to import the eXimia (Nexstim) TMS-compatible EEG system data to
-the MNE software. The utility uses an optional fif file containing
-the head digitization data to allow source modeling. The MNE Matlab
-toolbox contains the function fiff_write_dig_file to
-write a digitization file based on digitization data available in
-another format, see :ref:`ch_matlab`.
-
-.. note::
-
-    mne_brain_vision2fiff reads events from the ``vmrk`` file referenced in the
-    ``vhdr`` file, but it only includes events whose "Type" is ``Stimulus`` and
-    whose "description" is given by ``S<number>``. All other events are ignored.
-
-
-The command-line options of mne_brain_vision2fiff are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---header <*name*>**
-
-    The name of the BrainVision header file. The extension of this file
-    is ``vhdr`` . The header file typically refers to a marker
-    file (``vmrk`` ) which is automatically processed and a
-    digital trigger channel (STI 014) is formed from the marker information.
-    The ``vmrk`` file is ignored if the ``--eximia`` option
-    is present.
-
-**\---dig <*name*>**
-
-    The name of the fif file containing the digitization data.
-
-**\---orignames**
-
-    Use the original EEG channel labels. If this option is absent the EEG
-    channels will be automatically renamed to EEG 001, EEG 002, *etc.*
-
-**\---eximia**
-
-    Interpret this as an eXimia data file. The first three channels
-    will be thresholded and interpreted as trigger channels. The composite
-    digital trigger channel will be composed in the same way as in the mne_kit2fiff utility,
-    see :ref:`BEHBJGGF`, above. In addition, the fourth channel
-    will be assigned as an EOG channel. This option is normally used
-    by the mne_eximia2fiff script,
-    see :ref:`BEHGCEHH`.
-
-**\---split <*size/MB*>**
-
-    Split the output data into several files which are no more than <*size*> MB.
-    By default, the output is split into files which are just below
-    2 GB so that the fif file maximum size is not exceeded.
-
-**\---out <*filename*>**
-
-    Specifies the name of the output fif format data file. If <*filename*> ends
-    with ``.fif`` or ``_raw.fif`` , these endings are
-    deleted. After these modifications, ``_raw.fif`` is inserted
-    after the remaining part of the file name. If the file is split
-    into multiple parts, the additional parts will be called
-    <*name*> ``-`` <*number*> ``_raw.fif`` .
-
-.. _BEHGCEHH:
-
-Converting eXimia EEG data
-==========================
-
-EEG data from the Nexstim eXimia system can be converted
-to the fif format with help of the mne_eximia2fiff script.
-It creates a BrainVision ``vhdr`` file and calls mne_brain_vision2fiff.
-Usage:
-
-``mne_eximia2fiff`` [``--dig`` dfile ] [``--orignames`` ] file1 file2 ...
-
-where file1 file2 ...
-are eXimia ``nxe`` files and the ``--orignames`` option
-is passed on to mne_brain_vision2fiff .
-If you want to convert all data files in a directory, say
-
-``mne_eximia2fiff *.nxe``
-
-The optional file specified with the ``--dig`` option is assumed
-to contain digitizer data from the recording in the Nexstim format.
-The resulting fif data file will contain these data converted to
-the fif format as well as the coordinate transformation between
-the eXimia digitizer and MNE head coordinate systems.
-
-.. note:: This script converts raw data files only.
-
-.. _BABCJEAD:
-
-Converting digitization data
-############################
-
-The mne_convert_dig_data utility
-converts Polhemus digitization data between different file formats.
-The input formats are:
-
-**fif**
-
-    The
-    standard format used in MNE. The digitization data are typically
-    present in the measurement files.
-
-**hpts**
-
-    A text format which is a translation
-    of the fif format data, see :ref:`CJADJEBH` below.
-
-**elp**
-
-    A text format produced by the *Source
-    Signal Imaging, Inc.* software. For description of this "probe" format,
-    see http://www.sourcesignal.com/formats_probe.html.
-
-The data can be output in fif and hpts formats.
-Only the last command-line option specifying an input file will
-be honored. Zero or more output file options can be present on the
-command line.
-
-.. note:: The elp and hpts input    files may contain textual EEG electrode labels. They will not be    copied to the fif format output.
-
-The command-line options of mne_convert_dig_data are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fif <*name*>**
-
-    Specifies the name of an input fif file.
-
-**\---hpts <*name*>**
-
-    Specifies the name of an input hpts file.
-
-**\---elp <*name*>**
-
-    Specifies the name of an input elp file.
-
-**\---fifout <*name*>**
-
-    Specifies the name of an output fif file.
-
-**\---hptsout <*name*>**
-
-    Specifies the name of an output hpts file.
-
-**\---headcoord**
-
-    The fif and hpts input
-    files are assumed to contain data in the  MNE head coordinate system,
-    see :ref:`BJEBIBAI`. With this option present, the data are
-    transformed to the MNE head coordinate system with help of the fiducial
-    locations in the data. Use this option if this is not the case or
-    if you are unsure about the definition of the coordinate system
-    of the fif and hpts input
-    data. This option is implied with elp input
-    files. If this option is present, the fif format output file will contain
-    the transformation between the original digitizer data coordinates
-    the MNE head coordinate system.
-
-.. _CJADJEBH:
-
-The hpts format
-===============
-
-The hpts format digitzer
-data file may contain comment lines starting with the pound sign
-(#) and data lines of the form:
-
- <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
-
-where
-
-** <*category*>**
-
-    defines the type of points. Allowed categories are: hpi , cardinal (fiducial ),eeg ,
-    and extra corresponding to head-position
-    indicator coil locations, cardinal landmarks, EEG electrode locations,
-    and additional head surface points, respectively. Note that tkmedit does not
-    recognize the fiducial as an
-    alias for cardinal .
-
-** <*identifier*>**
-
-    identifies the point. The identifiers are usually sequential numbers. For
-    cardinal landmarks, 1 = left auricular point, 2 = nasion, and 3
-    = right auricular point. For EEG electrodes, identifier = 0 signifies
-    the reference electrode. Some programs (not tkmedit )
-    accept electrode labels as identifiers in the eeg category.
-
-** <*x/mm*> , <*y/mm*> , <*z/mm*>**
-
-    Location of the point, usually in the MEG head coordinate system, see :ref:`BJEBIBAI`.
-    Some programs have options to accept coordinates in meters instead
-    of millimeters. With ``--meters`` option, mne_transform_points lists
-    the coordinates in meters.
-
-.. _BEHDEJEC:
-
-Converting volumetric data into an MRI overlay
-##############################################
-
-With help of the mne_volume_source_space utility
-(:ref:`BJEFEHJI`) it is possible to create a source space which
-is defined within a volume rather than a surface. If the ``--mri`` option
-was used in mne_volume_source_space , the
-source space file contains an interpolator matrix which performs
-a trilinear interpolation into the voxel space of the MRI volume
-specified.
-
-At present, the MNE software does not include facilities
-to compute volumetric source estimates. However, it is possible
-to calculate forward solutions in the volumetric grid and use the
-MNE Matlab toolbox to read the forward solution. It is then possible
-to compute, *e.g.*, volumetric beamformer solutions
-in Matlab and output the results into w or stc files.
-The purpose of the mne_volume_data2mri is
-to produce MRI overlay data compatible with FreeSurfer MRI viewers
-(in the mgh or mgz formats) from this type of w or stc files.
-
-mne_volume_data2mri accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---src <*filename*>**
-
-    The name of the volumetric source space file created with mne_volume_source_space .
-    The source space must have been created with the ``--mri`` option,
-    which adds the appropriate sparse trilinear interpolator matrix
-    to the source space.
-
-**\---w <*filename*>**
-
-    The name of a w file to convert
-    into an MRI overlay.
-
-**\---stc <*filename*>**
-
-    The name of the stc file to convert
-    into an MRI overlay. If this file has many time frames, the output
-    file may be huge. Note: If both ``-w`` and ``--stc`` are
-    specified, ``-w`` takes precedence.
-
-**\---scale <*number*>**
-
-    Multiply the stc or w by
-    this scaling constant before producing the overlay.
-
-**\---out <*filename*>**
-
-    Specifies the name of the output MRI overlay file. The name must end
-    with either ``.mgh`` or ``.mgz`` identifying the
-    uncompressed and compressed FreeSurfer MRI formats, respectively.
-
-.. _BEHBHIDH:
-
-Listing source space data
-#########################
-
-The utility mne_list_source_space outputs
-the source space information into text files suitable for loading
-into the Neuromag MRIlab software.
-
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---src <*name*>**
-
-    The source space to be listed. This can be either the output from mne_make_source_space
-    (`*src.fif`), output from the forward calculation (`*fwd.fif`), or
-    the output from the inverse operator decomposition (`*inv.fif`).
-
-**\---mri <*name*>**
-
-    A file containing the transformation between the head and MRI coordinates
-    is specified with this option. This file can be either a Neuromag
-    MRI description file, the output from the forward calculation (`*fwd.fif`),
-    or the output from the inverse operator decomposition (`*inv.fif`).
-    If this file is included, the output will be in head coordinates.
-    Otherwise the source space will be listed in MRI coordinates.
-
-**\---dip <*name*>**
-
-    Specifies the 'stem' for the Neuromag text format
-    dipole files to be output. Two files will be produced: <*stem*> -lh.dip
-    and <*stem*> -rh.dip. These correspond
-    to the left and right hemisphere part of the source space, respectively.
-    This source space data can be imported to MRIlab through the File/Import/Dipoles menu
-    item.
-
-**\---pnt <*name*>**
-
-    Specifies the 'stem' for Neuromag text format
-    point files to be output. Two files will be produced: <*stem*> -lh.pnt
-    and <*stem*> -rh.pnt. These correspond
-    to the left and right hemisphere part of the source space, respectively.
-    This source space data can be imported to MRIlab through the File/Import/Strings menu
-    item.
-
-**\---exclude <*name*>**
-
-    Exclude the source space points defined by the given FreeSurfer 'label' file
-    from the output. The name of the file should end with ``-lh.label``
-    if it refers to the left hemisphere and with ``-rh.label`` if
-    it lists points in the right hemisphere, respectively.
-
-**\---include <*name*>**
-
-    Include only the source space points defined by the given FreeSurfer 'label' file
-    to the output. The file naming convention is the same as described
-    above under the ``--exclude`` option. Are 'include' labels are
-    processed before the 'exclude' labels.
-
-**\---all**
-
-    Include all nodes in the output files instead of only those active
-    in the source space. Note that the output files will be huge if
-    this option is active.
-
-.. _BEHBBEHJ:
-
-Listing BEM mesh data
-#####################
-
-The utility mne_list_bem outputs
-the BEM meshes in text format. The default output data contains
-the *x*, *y*, and *z* coordinates
-of the vertices, listed in millimeters, one vertex per line.
-
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---bem <*name*>**
-
-    The BEM file to be listed. The file name normally ends with -bem.fif or -bem-sol.fif .
-
-**\---out <*name*>**
-
-    The output file name.
-
-**\---id <*number*>**
-
-    Identify the surface to be listed. The surfaces are numbered starting with
-    the innermost surface. Thus, for a three-layer model the surface numbers
-    are: 4 = scalp, 3 = outer skull, 1 = inner skull
-    Default value is 4.
-
-**\---gdipoli**
-
-    List the surfaces in the format required by Thom Oostendorp's
-    gdipoli program. This is also the default input format for mne_surf2bem .
-
-**\---meters**
-
-    List the surface coordinates in meters instead of millimeters.
-
-**\---surf**
-
-    Write the output in the binary FreeSurfer format.
-
-**\---xfit**
-
-    Write a file compatible with xfit. This is the same effect as using
-    the options ``--gdipoli`` and ``--meters`` together.
-
-.. _BEHDIAJG:
-
-Converting surface data between different formats
-#################################################
-
-The utility mne_convert_surface converts
-surface data files between different formats.
-
-.. note:: The MNE Matlab toolbox functions enable    reading of FreeSurfer surface files directly. Therefore, the ``--mat``   option has been removed. The dfs file format conversion functionality    has been moved here from mne_convert_dfs .    Consequently, mne_convert_dfs has    been removed from MNE software.
-
-.. _BABEABAA:
-
-command-line options
-====================
-
-mne_convert_surface accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fif <*name*>**
-
-    Specifies a fif format input file. The first surface (source space)
-    from this file will be read.
-
-**\---tri <*name*>**
-
-    Specifies a text format input file. The format of this file is described in :ref:`BEHDEFCD`.
-
-**\---meters**
-
-    The unit of measure for the vertex locations in a text input files
-    is meters instead of the default millimeters. This option does not
-    have any effect on the interpretation of the FreeSurfer surface
-    files specified with the ``--surf`` option.
-
-**\---swap**
-
-    Swap the ordering or the triangle vertices. The standard convention in
-    the MNE software is to have the vertices in text format files ordered
-    so that the vector cross product of the vectors from vertex 1 to
-    2 and 1 to 3 gives the direction of the outward surface normal. This
-    is also called the counterclockwise ordering. If your text input file
-    does not comply with this right-hand rule, use the ``--swap`` option.
-    This option does not have any effect on the interpretation of the FreeSurfer surface
-    files specified with the ``--surf`` option.
-
-**\---surf <*name*>**
-
-    Specifies a FreeSurfer format
-    input file.
-
-**\---dfs <*name*>**
-
-    Specifies the name of a dfs file to be converted. The surfaces produced
-    by BrainSuite are in the dfs format.
-
-**\---mghmri <*name*>**
-
-    Specifies a mgh/mgz format MRI data file which will be used to define
-    the coordinate transformation to be applied to the data read from
-    a dfs file to bring it to the FreeSurfer MRI
-    coordinates, *i.e.*, the coordinate system of
-    the MRI stack in the file. In addition, this option can be used
-    to insert "volume geometry" information to the FreeSurfer
-    surface file output (``--surfout`` option). If the input file already
-    contains the volume geometry information, --replacegeom is needed
-    to override the input volume geometry and to proceed to writing
-    the data.
-
-**\---replacegeom**
-
-    Replaces existing volume geometry information. Used in conjunction
-    with the ``--mghmri`` option described above.
-
-**\---fifmri <*name*>**
-
-    Specifies a fif format MRI destription file which will be used to define
-    the coordinate transformation to be applied to the data read from
-    a dfs file to bring it to the same coordinate system as the MRI stack
-    in the file.
-
-**\---trans <*name*>**
-
-    Specifies the name of a text file which contains the coordinate
-    transformation to be applied to the data read from the dfs file
-    to bring it to the MRI coordinates, see below. This option is rarely
-    needed.
-
-**\---flip**
-
-    By default, the dfs surface nodes are assumed to be in a right-anterior-superior
-    (RAS) coordinate system with its origin at the left-posterior-inferior
-    (LPI) corner of the MRI stack. Sometimes the dfs file has left and
-    right flipped. This option reverses this flip, *i.e.*,
-    assumes the surface coordinate system is left-anterior-superior
-    (LAS) with its origin in the right-posterior-inferior (RPI) corner
-    of the MRI stack.
-
-**\---shift <*value/mm*>**
-
-    Shift the surface vertices to the direction of the surface normals
-    by this amount before saving the surface.
-
-**\---surfout <*name*>**
-
-    Specifies a FreeSurfer format output file.
-
-**\---fifout <*name*>**
-
-    Specifies a fif format output file.
-
-**\---triout <*name*>**
-
-    Specifies an ASCII output file that will contain the surface data
-    in the triangle file format desribed in :ref:`BEHDEFCD`.
-
-**\---pntout <*name*>**
-
-    Specifies a ASCII output file which will contain the vertex numbers only.
-
-**\---metersout**
-
-    With this option the ASCII output will list the vertex coordinates
-    in meters instead of millimeters.
-
-**\---swapout**
-
-    Defines the vertex ordering of ASCII triangle files to be output.
-    For details, see ``--swap`` option, above.
-
-**\---smfout <*name*>**
-
-    Specifies a smf (Simple Model Format) output file. For details of this
-    format, see http://people.scs.fsu.edu/~burkardt/data/smf.txt.
-
-.. note:: Multiple output options can be specified to    produce outputs in several different formats with a single invocation    of mne_convert_surface .
-
-The coordinate transformation file specified with the ``--trans`` should contain
-a 4 x 4 coordinate transformation matrix:
-
-.. math::    T = \begin{bmatrix}
-		R_{11} & R_{12} & R_{13} & x_0 \\
-		R_{13} & R_{13} & R_{13} & y_0 \\
-		R_{13} & R_{13} & R_{13} & z_0 \\
-		0 & 0 & 0 & 1
-		\end{bmatrix}
-
-defined so that if the augmented location vectors in the
-dfs file and MRI coordinate systems are denoted by :math:`r_{dfs} = [x_{dfs} y_{dfs} z_{dfs} 1]^T` and :math:`r_{MRI} = [x_{MRI} y_{MRI} z_{MRI} 1]^T`,
-respectively,
-
-.. math::    r_{MRI} = Tr_{dfs}
-
-.. _BABBHHHE:
-
-Converting MRI data into the fif format
-#######################################
-
-The utility mne_make_cor_set creates
-a fif format MRI description
-file optionally including the MRI data using FreeSurfer MRI volume
-data as input. The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---dir <*directory*>**
-
-    Specifies a directory containing the MRI volume in COR format. Any
-    previous ``--mgh`` options are cancelled when this option
-    is encountered.
-
-**\---withdata**
-
-    Include the pixel data to the output file. This option is implied
-    with the ``--mgh`` option.
-
-**\---mgh <*name*>**
-
-    An MRI volume volume file in mgh or mgz format.
-    The ``--withdata`` option is implied with this type of
-    input. Furthermore, the :math:`T_3` transformation,
-    the Talairach transformation :math:`T_4` from
-    the talairach.xfm file referred to in the MRI volume, and the the
-    fixed transforms :math:`T_-` and :math:`T_+` will
-    added to the output file. For definition of the coordinate transformations,
-    see :ref:`CHDEDFIB`.
-
-**\---talairach <*name*>**
-
-    Take the Talairach transform from this file instead of the one specified
-    in mgh/mgz files.
-
-**\---out <*name*>**
-
-    Specifies the output file, which is a fif-format MRI description
-    file.
-
-.. _BABBIFIJ:
-
-Collecting coordinate transformations into one file
-###################################################
-
-The utility mne_collect_transforms collects
-coordinate transform information from various sources and saves
-them into a single fif file. The coordinate transformations used
-by MNE software are summarized in Figure 5.1. The output
-of mne_collect_transforms may
-include all transforms referred to therein except for the sensor
-coordinate system transformations :math:`T_{s_1} \dotso T_{s_n}`.
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---meas <*name*>**
-
-    Specifies a measurement data file which provides :math:`T_1`.
-    A forward solution or an inverse operator file can also be specified
-    as implied by Table 5.1.
-
-**\---mri <*name*>**
-
-    Specifies an MRI description or a standalone coordinate transformation
-    file produced by mne_analyze which
-    provides :math:`T_2`. If the ``--mgh`` option
-    is not present mne_collect_transforms also
-    tries to find :math:`T_3`, :math:`T_4`, :math:`T_-`,
-    and :math:`T_+` from this file.
-
-**\---mgh <*name*>**
-
-    An MRI volume volume file in mgh or mgz format.
-    This file provides :math:`T_3`. The transformation :math:`T_4` will
-    be read from the talairach.xfm file referred to in the MRI volume.
-    The fixed transforms :math:`T_-` and :math:`T_+` will
-    also be created.
-
-**\---out <*name*>**
-
-    Specifies the output file. If this option is not present, the collected transformations
-    will be output on screen but not saved.
-
-.. _BEHCHGHD:
-
-Converting an ncov covariance matrix file to fiff
-#################################################
-
-The ncov file format was used to store the noise-covariance
-matrix file. The MNE software requires that the covariance matrix
-files are in fif format. The utility mne_convert_ncov converts
-ncov files to fif format.
-
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---ncov <*name*>**
-
-    The ncov file to be converted.
-
-**\---meas <*name*>**
-
-    A fif format measurement file used to assign channel names to the noise-covariance
-    matrix elements. This file should have precisely the same channel
-    order within MEG and EEG as the ncov file. Typically, both the ncov
-    file and the measurement file are created by the now mature off-line
-    averager, meg_average .
-
-.. _BEHCDBHG:
-
-Converting a lisp covariance matrix to fiff
-###########################################
-
-The utility mne_convert_lspcov converts a LISP-format noise-covariance file,
-produced by the Neuromag signal processor, graph into fif format.
-
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---lspcov <*name*>**
-
-    The LISP noise-covariance matrix file to be converted.
-
-**\---meas <*name*>**
-
-    A fif format measurement file used to assign channel names to the noise-covariance
-    matrix elements. This file should have precisely the same channel
-    order within MEG and EEG as the LISP-format covariance matrix file.
-
-**\---out <*name*>**
-
-    The name of a fif format output file. The file name should end with
-    -cov.fif.text format output file. No information about the channel names
-    is included. The covariance matrix file is listed row by row. This
-    file can be loaded to MATLAB, for example
-
-**\---outasc <*name*>**
-
-    The name of a text format output file. No information about the channel
-    names is included. The covariance matrix file is listed row by row.
-    This file can be loaded to MATLAB, for example
-
-.. _BEHCCEBJ:
-
-The MNE data file conversion tool
-#################################
-
-This utility, called mne_convert_mne_data ,
-allows the conversion of various fif files related to the MNE computations
-to other formats. The two principal purposes of this utility are
-to facilitate development of new analysis approaches with Matlab
-and conversion of the forward model and noise covariance matrix
-data into evoked-response type fif files, which can be accessed
-and displayed with the Neuromag source modelling software.
-
-.. note:: Most of the functions of mne_convert_mne_data are    now covered by the MNE Matlab toolbox covered in :ref:`ch_matlab`.    This toolbox is recommended to avoid creating additional files occupying    disk space.
-
-.. _BEHCICCF:
-
-Command-line options
-====================
-
-The command-line options recognize
-by mne_convert_mne_data are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fwd <*name*>**
-
-    Specity the name of the forward solution file to be converted. Channels
-    specified with the ``--bad`` option will be excluded from
-    the file.
-
-**\---fixed**
-
-    Convert the forward solution to the fixed-orientation mode before outputting
-    the converted file. With this option only the field patterns corresponding
-    to a dipole aligned with the estimated cortex surface normal are
-    output.
-
-**\---surfsrc**
-
-    When outputting a free-orientation forward model (three orthogonal dipole
-    components present) rotate the dipole coordinate system at each
-    source node so that the two tangential dipole components are output
-    first, followed by the field corresponding to the dipole aligned
-    with the estimated cortex surface normal. The orientation of the
-    first two dipole components in the tangential plane is arbitrarily selected
-    to create an orthogonal coordinate system.
-
-**\---noiseonly**
-
-    When creating a 'measurement' fif file, do not
-    output a forward model file, just the noise-covariance matrix.
-
-**\---senscov <*name*>**
-
-    Specifies the fif file containing a sensor covariance matrix to
-    be included with the output. If no other input files are specified
-    only the covariance matrix is output
-
-**\---srccov <*name*>**
-
-    Specifies the fif file containing the source covariance matrix to
-    be included with the output. Only diagonal source covariance files
-    can be handled at the moment.
-
-**\---bad <*name*>**
-
-    Specifies the name of the file containing the names of the channels to
-    be omitted, one channel name per line. This does not affect the output
-    of the inverse operator since the channels have been already selected
-    when the file was created.
-
-**\---fif**
-
-    Output the forward model and the noise-covariance matrix into 'measurement' fif
-    files. The forward model files are tagged with <*modalities*> ``-meas-fwd.fif`` and
-    the noise-covariance matrix files with <*modalities*> ``-meas-cov.fif`` .
-    Here, modalities is ``-meg`` if MEG is included, ``-eeg`` if
-    EEG is included, and ``-meg-eeg`` if both types of signals
-    are present. The inclusion of modalities is controlled by the ``--meg`` and ``--eeg`` options.
-
-**\---mat**
-
-    Output the data into MATLAB mat files. This is the default. The
-    forward model files are tagged with <*modalities*> ``-fwd.mat`` forward model
-    and noise-covariance matrix output, with ``-inv.mat`` for inverse
-    operator output, and with ``-inv-meas.mat`` for combined inverse
-    operator and measurement data output, respectively. The meaning
-    of <*modalities*> is the same
-    as in the fif output, described above.
-
-**\---tag <*name*>**
-
-    By default, all variables in the matlab output files start with
-    ``mne\_``. This option allows to change this prefix to <*name*> _.
-
-**\---meg**
-
-    Include MEG channels from the forward solution and noise-covariance
-    matrix.
-
-**\---eeg**
-
-    Include EEG channels from the forward solution and noise-covariance
-    matrix.
-
-**\---inv <*name*>**
-
-    Output the inverse operator data from the specified file into a
-    mat file. The source and noise covariance matrices as well as active channels
-    have been previously selected when the inverse operator was created
-    with mne_inverse_operator . Thus
-    the options ``--meg`` , ``--eeg`` , ``--senscov`` , ``--srccov`` , ``--noiseonly`` ,
-    and ``--bad`` do not affect the output of the inverse operator.
-
-**\---meas <*name*>**
-
-    Specifies the file containing measurement data to be output together with
-    the inverse operator. The channels corresponding to the inverse operator
-    are automatically selected from the file if ``--inv`` .
-    option is present. Otherwise, the channel selection given with ``--sel`` option will
-    be taken into account.
-
-**\---set <*number*>**
-
-    Select the data set to be output from the measurement file.
-
-**\---bmin <*value/ms*>**
-
-    Specifies the baseline minimum value setting for the measurement signal
-    output.
-
-**\---bmax <*value/ms*>**
-
-    Specifies the baseline maximum value setting for the measurement signal
-    output.
-
-.. note:: The ``--tmin`` and ``--tmax`` options    which existed in previous versions of mne_converted_mne_data have    been removed. If output of measurement data is requested, the entire    averaged epoch is now included.
-
-Guide to combining options
-==========================
-
-The combination of options is quite complicated. The :ref:`BEHDCIII` should be
-helpful to determine the combination of options appropriate for your needs.
-
-
-.. tabularcolumns:: |p{0.38\linewidth}|p{0.1\linewidth}|p{0.2\linewidth}|p{0.3\linewidth}|
-.. _BEHDCIII:
-.. table:: Guide to combining mne_convert_mne_data options.
-
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | Desired output                      | Format  | Required options         | Optional options      |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | forward model                       | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       | \---surfsrc           |
-    |                                     |         |   \---meg and/or \---eeg |                       |
-    |                                     |         |   \---fif                |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | forward model                       | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       | \---surfsrc           |
-    |                                     |         |   \---meg and/or --eeg   |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | forward model and sensor covariance | mat     |   \---fwd <*name*>       | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       | \---surfsrc           |
-    |                                     |         |   \---senscov <*name*>   |                       |
-    |                                     |         |   \---meg and/or --eeg   |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | sensor covariance                   | fif     |   \---fwd <*name*>       | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       |                       |
-    |                                     |         |   \---senscov <*name*>   |                       |
-    |                                     |         |   \---noiseonly          |                       |
-    |                                     |         |   \---fif                |                       |
-    |                                     |         |   \---meg and/or --eeg   |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | sensor covariance                   | mat     |   \---senscov <*name*>   | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | sensor covariance eigenvalues       | text    |   \---senscov <*name*>   | \---bad <*name*>      |
-    |                                     |         |   \---out <*name*>       |                       |
-    |                                     |         |   \---eig                |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | evoked MEG/EEG data                 | mat     |   \---meas <*name*>      | \---sel <*name*>      |
-    |                                     |         |   \---out <*name*>       | \---set <*number*>    |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | evoked MEG/EEG data forward model   | mat     |   \---meas <*name*>      | \---bad <*name*>      |
-    |                                     |         |   \---fwd <*name*>       | \---set <*number*>    |
-    |                                     |         |   \---out <*name*>       |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | inverse operator data               | mat     |   \---inv <*name*>       |                       |
-    |                                     |         |   \---out <*name*>       |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-    | inverse operator data evoked        | mat     |   \–--inv <*name*>       |                       |
-    | MEG/EEG data                        |         |   \–--meas <*name*>      |                       |
-    |                                     |         |   \–--out <*name*>       |                       |
-    +-------------------------------------+---------+--------------------------+-----------------------+
-
-Matlab data structures
-======================
-
-The Matlab output provided by mne_convert_mne_data is
-organized in structures, listed in :ref:`BEHCICCA`. The fields
-occurring in these structures are listed in :ref:`BABCBIGF`.
-
-The symbols employed in variable size descriptions are:
-
-**nloc**
-
-    Number
-    of source locations
-
-**nsource**
-
-    Number
-    of sources. For fixed orientation sources nsource = nloc whereas nsource = 3*nloc for
-    free orientation sources
-
-**nchan**
-
-    Number
-    of measurement channels.
-
-**ntime**
-
-    Number
-    of time points in the measurement data.
-
-.. _BEHCICCA:
-.. table:: Matlab structures produced by mne_convert_mne_data.
-
-    ===============  =======================================
-    Structure        Contents
-    ===============  =======================================
-    <*tag*> _meas      Measured data
-    <*tag*> _inv       The inverse operator decomposition
-    <*tag*> _fwd       The forward solution
-    <*tag*> _noise     A standalone noise-covariance matrix
-    ===============  =======================================
-
-The prefix given with the ``--tag`` option is indicated <*tag*> , see :ref:`BEHCICCF`. Its default value is MNE.
-
-
-.. tabularcolumns:: |p{0.14\linewidth}|p{0.13\linewidth}|p{0.73\linewidth}|
-.. _BABCBIGF:
-.. table:: The fields of Matlab structures.
-
-
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | Variable              | Size            | Description                                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | fwd                   | nsource x nchan | The forward solution, one source on each row. For free     |
-    |                       |                 | orientation sources, the fields of the three orthogonal    |
-    |                       |                 | dipoles for each location are listed consecutively.        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | names ch_names        | nchan (string)  | String array containing the names of the channels included |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG,       |
-    |                       |                 | 2 = EEG). The second column lists the coil types, see      |
-    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
-    |                       |                 | this value equals one.                                     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_pos                | nchan x 3       | The location information for each channel. The first three |
-    |                       |                 | values specify the origin of the sensor coordinate system  |
-    |                       |                 | or the location of the electrode. For MEG channels, the    |
-    |                       |                 | following nine number specify the *x*, *y*, and            |
-    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
-    |                       |                 | For EEG electrodes the first unit vector specifies the     |
-    |                       |                 | location of the reference electrode. If the reference is   |
-    |                       |                 | not specified this value is all zeroes. The remaining unit |
-    |                       |                 | vectors are irrelevant for EEG electrodes.                 |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file. The  |
-    |                       |                 | unit of the data is listed in the first column (T = 112,   |
-    |                       |                 | T/m = 201, V = 107). At present, the second column will be |
-    |                       |                 | always zero, *i.e.*, no unit multiplier.                   |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_cals               | nchan x 2       | Even if the data comes from the conversion already         |
-    |                       |                 | calibrated, the original calibration factors are included. |
-    |                       |                 | The first column is the range member of the fif data       |
-    |                       |                 | structures and while the second is the cal member. To get  |
-    |                       |                 | calibrated values in the units given in ch_units from the  |
-    |                       |                 | raw data, the data must be multiplied with the product of  |
-    |                       |                 | range and cal.                                             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | sfreq                 | 1               | The sampling frequency in Hz.                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | highpass              | 1               | Highpass filter frequency (Hz)                             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | source_loc            | nloc x 3        | The source locations given in the coordinate frame         |
-    |                       |                 | indicated by the coord_frame member.                       |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | source_ori            | nsource x 3     | The source orientations                                    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | source_selection      | nsource x 2     | Indication of the sources selected from the complete source|
-    |                       |                 | spaces. Each row contains the number of the source in the  |
-    |                       |                 | complete source space (starting with 0) and the source     |
-    |                       |                 | space number (1 or 2). These numbers refer to the order the|
-    |                       |                 | two hemispheres where listed when mne_make_source_space was|
-    |                       |                 | invoked. mne_setup_source_space lists the left hemisphere  |
-    |                       |                 | first.                                                     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | coord_frame           | string          | Name of the coordinate frame employed in the forward       |
-    |                       |                 | calculations. Possible values are 'head' and 'mri'.        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | mri_head_trans        | 4 x 4           | The coordinate frame transformation from mri the MEG 'head'|
-    |                       |                 | coordinates.                                               |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
-    |                       |                 | coordinates to the MEG head coordinates                    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | noise_cov             | nchan x nchan   | The noise covariance matrix                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | source_cov            | nsource         | The elements of the diagonal source covariance matrix.     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | sing                  | nchan           | The singular values of                                     |
-    |                       |                 | :math:`A = C_0^{-^1/_2} G R^C = U \Lambda V^T`             |
-    |                       |                 | with :math:`R` selected so that                            |
-    |                       |                 | :math:`\text{trace}(AA^T) / \text{trace}(I) = 1`           |
-    |                       |                 | as discussed in :ref:`CHDDHAGE`                            |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | eigen_fields          | nchan x nchan   | The rows of this matrix are the left singular vectors of   |
-    |                       |                 | :math:`A`, i.e., the columns of :math:`U`, see above.      |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | eigen_leads           | nchan x nsource | The rows of this matrix are the right singular vectors of  |
-    |                       |                 | :math:`A`, i.e., the columns of :math:`V`, see above.      |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | noise_eigenval        | nchan           | In terms of :ref:`CHDDHAGE`, eigenvalues of :math:`C_0`,   |
-    |                       |                 | i.e., not scaled with number of averages.                  |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | noise_eigenvec        | nchan           | Eigenvectors of the noise covariance matrix. In terms of   |
-    |                       |                 | :ref:`CHDDHAGE`, :math:`U_C^T`.                            |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | data                  | nchan x ntime   | The measured data. One row contains the data at one time   |
-    |                       |                 | point.                                                     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | times                 | ntime           | The time points in the above matrix in seconds             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | nave                  | 1               | Number of averages as listed in the data file.             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | meas_times            | ntime           | The time points in seconds.                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-
-.. _convert_to_matlab:
-
-Converting raw data to Matlab format
-####################################
-
-The utility mne_raw2mat converts
-all or selected channels from a raw data file to a Matlab mat file.
-In addition, this utility can provide information about the raw
-data file so that the raw data can be read directly from the original
-fif file using Matlab file I/O routines.
-
-.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides    direct access to raw fif files without a need for conversion to    mat file format first. Therefore, it is recommended that you use    the Matlab toolbox rather than  mne_raw2mat which    creates large files occupying disk space unnecessarily.
-
-Command-line options
-====================
-
-mne_raw2mat accepts the
-following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---raw <*name*>**
-
-    Specifies the name of the raw data fif file to convert.
-
-**\---mat <*name*>**
-
-    Specifies the name of the destination Matlab file.
-
-**\---info**
-
-    With this option present, only information about the raw data file
-    is included. The raw data itself is omitted.
-
-**\---sel <*name*>**
-
-    Specifies a text file which contains the names of the channels to include
-    in the output file, one channel name per line. If the ``--info`` option
-    is specified, ``--sel`` does not have any effect.
-
-**\---tag <*tag*>**
-
-    By default, all Matlab variables included in the output file start
-    with ``mne\_``. This option changes the prefix to <*tag*> _.
-
-Matlab data structures
-======================
-
-The Matlab files output by mne_raw2mat can
-contain two data structures, <*tag*>_raw and <*tag*>_raw_info .
-If ``--info`` option is specifed, the file contains the
-latter structure only.
-
-The <*tag*>_raw structure
-contains only one field, data which
-is a matrix containing the raw data. Each row of this matrix constitutes
-the data from one channel in the original file. The data type of
-this matrix is the same of the original data (2-byte signed integer,
-4-byte signed integer, or single-precision float).
-
-The fields of the <*tag*>_raw_info structure
-are listed in :ref:`BEHFDCIH`. Further explanation of the bufs field
-is provided in :ref:`BEHJEIHJ`.
-
-
-.. tabularcolumns:: |p{0.2\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
-.. _BEHFDCIH:
-.. table:: The fields of the raw data info structure.
-
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | Variable              | Size            | Description                                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | orig_file             | string          | The name of the original fif file specified with the       |
-    |                       |                 | ``--raw`` option.                                          |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | nchan                 | 1               | Number of channels.                                        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | nsamp                 | 1               | Total number of samples                                    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | bufs                  | nbuf x 4        | This field is present if ``--info`` option was specified on|
-    |                       |                 | the command line. For details, see :ref:`BEHJEIHJ`.        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | sfreq                 | 1               | The sampling frequency in Hz.                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | highpass              | 1               | Highpass filter frequency (Hz)                             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_names              | nchan (string)  | String array containing the names of the channels included |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_types              | nchan x 2       | The column lists the types of the channesl (1 = MEG, 2 =   |
-    |                       |                 | EEG). The second column lists the coil types, see          |
-    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
-    |                       |                 | this value equals one.                                     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
-    |                       |                 | The unit of the data is listed in the first column         |
-    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
-    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
-    |                       |                 | values specify the origin of the sensor coordinate system  |
-    |                       |                 | or the location of the electrode. For MEG channels, the    |
-    |                       |                 | following nine number specify the *x*, *y*, and            |
-    |                       |                 | *z*-direction unit vectors of the sensor coordinate system.|
-    |                       |                 | For EEG electrodes the first vector after the electrode    |
-    |                       |                 | location specifies the location of the reference electrode.|
-    |                       |                 | If the reference is not specified this value is all zeroes.|
-    |                       |                 | The remaining unit vectors are irrelevant for EEG          |
-    |                       |                 | electrodes.                                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat is uncalibrated.        |
-    |                       |                 | The first column is the range member of the fiff data      |
-    |                       |                 | structures and while the second is the cal member. To get  |
-    |                       |                 | calibrared data values in the units given in ch_units from |
-    |                       |                 | the raw data, the data must be multiplied with the product |
-    |                       |                 | of range and cal .                                         |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
-    |                       |                 | coordinates to the MEG head coordinates.                   |
-    +-----------------------+-----------------+------------------------------------------------------------+
-
-
-.. tabularcolumns:: |p{0.1\linewidth}|p{0.6\linewidth}|
-.. _BEHJEIHJ:
-.. table:: The bufs member of the raw data info structure.
-
-    +-----------------------+-------------------------------------------------------------------------+
-    | Column                | Contents                                                                |
-    +-----------------------+-------------------------------------------------------------------------+
-    | 1                     | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte signed   |
-    |                       | integer, 4 = single-precision float). All data in the fif file are      |
-    |                       | written in the big-endian byte order. The raw data are stored sample by |
-    |                       | sample.                                                                 |
-    +-----------------------+-------------------------------------------------------------------------+
-    | 2                     | Byte location of this buffer in the original fif file.                  |
-    +-----------------------+-------------------------------------------------------------------------+
-    | 3                     | First sample of this buffer. Since raw data storing can be switched on  |
-    |                       | and off during the acquisition, there might be gaps between the end of  |
-    |                       | one buffer and the beginning of the next.                               |
-    +-----------------------+-------------------------------------------------------------------------+
-    | 4                     | Number of samples in the buffer.                                        |
-    +-----------------------+-------------------------------------------------------------------------+
-
-.. _BEHFIDCB:
-
-Converting epochs to Matlab format
-##################################
-
-The utility mne_epochs2mat converts
-epoch data including all or selected channels from a raw data file
-to a simple binary file with an associated description file in Matlab
-mat file format. With help of the description file, a matlab program
-can easily read the epoch data from the simple binary file. Signal
-space projection and bandpass filtering can be optionally applied
-to the raw data prior to saving the epochs.
-
-.. note:: The MNE Matlab toolbox described in :ref:`ch_matlab` provides direct    access to raw fif files without conversion with mne_epochs2mat first.    Therefore, it is recommended that you use the Matlab toolbox rather than mne_epochs2mat which    creates large files occupying disk space unnecessarily. An exception    to this is the case where you apply a filter to the data and save    the band-pass filtered epochs.
-
-Command-line options
-====================
-
-mne_epochs2mat accepts
-the following command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---raw <*name*>**
-
-    Specifies the name of the raw data fif file to use as input.
-
-**\---mat <*name*>**
-
-    Specifies the name of the destination file. Anything following the last
-    period in the file name will be removed before composing the output
-    file name. The binary epoch file will be called <*trimmed name*> ``.epochs`` and
-    the corresponding Matlab description file will be <*trimmed name*> ``_desc.mat`` .
-
-**\---tag <*tag*>**
-
-    By default, all Matlab variables included in the description file
-    start with ``mne\_``. This option changes the prefix to <*tag*> _.
-
-**\---events <*name*>**
-
-    The file containing the event definitions. This can be a text or
-    fif format file produced by mne_process_raw or mne_browse_raw ,
-    see :ref:`CACBCEGC`. With help of this file it is possible
-    to select virtually any data segment from the raw data file. If
-    this option is missing, the digital trigger channel in the raw data
-    file or a fif format event file produced automatically by mne_process_raw or mne_browse_raw is
-    consulted for event information.
-
-**\---event <*name*>**
-
-    Event number identifying the epochs of interest.
-
-**\---tmin <*time/ms*>**
-
-    The starting point of the epoch with respect to the event of interest.
-
-**\---tmax <*time/ms*>**
-
-    The endpoint of the epoch with respect to the event of interest.
-
-**\---sel <*name*>**
-
-    Specifies a text file which contains the names of the channels to include
-    in the output file, one channel name per line. If the ``--inv`` option
-    is specified, ``--sel`` is ignored. If neither ``--inv`` nor ``--sel`` is
-    present, all MEG and EEG channels are included. The digital trigger
-    channel can be included with the ``--includetrig`` option, described
-    below.
-
-**\---inv <*name*>**
-
-    Specifies an inverse operator, which will be employed in two ways. First,
-    the channels included to output will be those included in the inverse
-    operator. Second, any signal-space projection operator present in
-    the inverse operator file will be applied to the data. This option
-    cancels the effect of ``--sel`` and ``--proj`` options.
-
-**\---digtrig <*name*>**
-
-    Name of the composite digital trigger channel. The default value
-    is 'STI 014'. Underscores in the channel name
-    will be replaced by spaces.
-
-**\---digtrigmask <*number*>**
-
-    Mask to be applied to the trigger channel values before considering them.
-    This option is useful if one wants to set some bits in a don't care
-    state. For example, some finger response pads keep the trigger lines
-    high if not in use, *i.e.*, a finger is not in
-    place. Yet, it is convenient to keep these devices permanently connected
-    to the acquisition system. The number can be given in decimal or
-    hexadecimal format (beginning with 0x or 0X). For example, the value
-    255 (0xFF) means that only the lowest order byte (usually trigger
-    lines 1 - 8 or bits 0 - 7) will be considered.
-
-**\---includetrig**
-
-    Add the digital trigger channel to the list of channels to output.
-    This option should not be used if the trigger channel is already
-    included in the selection specified with the ``--sel`` option.
-
-**\---filtersize <*size*>**
-
-    Adjust the length of the FFT to be applied in filtering. The number will
-    be rounded up to the next power of two. If the size is :math:`N`,
-    the corresponding length of time is :math:`^N/_{f_s}`,
-    where :math:`f_s` is the sampling frequency
-    of your data. The filtering procedure includes overlapping tapers
-    of length :math:`^N/_2` so that the total FFT
-    length will actually be :math:`2N`. The default
-    value is 4096.
-
-**\---highpass <*value/Hz*>**
-
-    Highpass filter frequency limit. If this is too low with respect
-    to the selected FFT length and data file sampling frequency, the
-    data will not be highpass filtered. You can experiment with the
-    interactive version to find the lowest applicable filter for your
-    data. This value can be adjusted in the interactive version of the
-    program. The default is 0, i.e., no highpass filter in effect.
-
-**\---highpassw <*value/Hz*>**
-
-    The width of the transition band of the highpass filter. The default
-    is 6 frequency bins, where one bin is :math:`^{f_s}/_{(2N)}`.
-
-**\---lowpass <*value/Hz*>**
-
-    Lowpass filter frequency limit. This value can be adjusted in the interactive
-    version of the program. The default is 40 Hz.
-
-**\---lowpassw <*value/Hz*>**
-
-    The width of the transition band of the lowpass filter. This value
-    can be adjusted in the interactive version of the program. The default
-    is 5 Hz.
-
-**\---filteroff**
-
-    Do not filter the data.
-
-**\---proj <*name*>**
-
-    Include signal-space projection (SSP) information from this file.
-    If the ``--inv`` option is present, ``--proj`` has
-    no effect.
-
-.. note:: Baseline has not been subtracted from the epochs. This has to be done in subsequent processing with Matlab if so desired.
-
-.. note:: Strictly speaking, trigger mask value zero would mean that all trigger inputs are ignored. However, for convenience,    setting the mask to zero or not setting it at all has the same effect    as 0xFFFFFFFF, *i.e.*, all bits set.
-
-.. note:: The digital trigger channel can also be set with the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_browse_raw or mne_process_raw .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
-
-.. note:: The digital trigger channel mask can also be    set with the MNE_TRIGGER_CH_MASK environment variable. Using the ``--digtrigmask`` option    supersedes the MNE_TRIGGER_CH_MASK environment variable.
-
-The binary epoch data file
-==========================
-
-mne_epochs2mat saves the
-epoch data extracted from the raw data file is a simple binary file.
-The data are stored as big-endian single-precision floating point
-numbers. Assuming that each of the total of :math:`p` epochs
-contains :math:`n` channels and :math:`m` time
-points, the data :math:`s_{jkl}` are ordered
-as
-
-.. math::    s_{111} \dotso s_{1n1} s_{211} \dotso s_{mn1} \dotso s_{mnp}\ ,
-
-where the first index stands for the time point, the second
-for the channel, and the third for the epoch number, respectively.
-The data are not calibrated, i.e., the calibration factors present
-in the Matlab description file have to be applied to get to physical
-units as described below.
-
-.. note:: The maximum size of an epoch data file is 2 Gbytes, *i.e.*, 0.5 Gsamples.
-
-Matlab data structures
-======================
-
-The Matlab description files output by mne_epochs2mat contain
-a data structure <*tag*>_epoch_info .
-The fields of the this structure are listed in :ref:`BEHFDCIH`.
-Further explanation of the epochs member
-is provided in :ref:`BEHHAGHE`.
-
-
-.. tabularcolumns:: |p{0.15\linewidth}|p{0.15\linewidth}|p{0.6\linewidth}|
-.. _BEHIFJIJ:
-.. table:: The fields of the raw data info structure.
-
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | Variable              | Size            | Description                                                |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | orig_file             | string          | The name of the original fif file specified with the       |
-    |                       |                 | ``--raw`` option.                                          |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | epoch_file            | string          | The name of the epoch data file produced by mne_epocs2mat. |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | nchan                 | 1               | Number of channels.                                        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | nepoch                | 1               | Total number of epochs.                                    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | epochs                | nepoch x 5      | Description of the content of the epoch data file,         |
-    |                       |                 | see :ref:`BEHHAGHE`.                                       |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | sfreq                 | 1               | The sampling frequency in Hz.                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | lowpass               | 1               | Lowpass filter frequency (Hz)                              |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | highpass              | 1               | Highpass filter frequency (Hz)                             |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_names              | nchan (string)  | String array containing the names of the channels included |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_types              | nchan x 2       | The column lists the types of the channels (1 = MEG, 2 =   |
-    |                       |                 | EEG). The second column lists the coil types, see          |
-    |                       |                 | :ref:`BGBBHGEC` and :ref:`CHDBDFJE`. For EEG electrodes,   |
-    |                       |                 | this value equals one.                                     |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_lognos             | nchan x 1       | Logical channel numbers as listed in the fiff file         |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_units              | nchan x 2       | Units and unit multipliers as listed in the fif file.      |
-    |                       |                 | The unit of the data is listed in the first column         |
-    |                       |                 | (T = 112, T/m = 201, V = 107). At present, the second      |
-    |                       |                 | column will be always zero, *i.e.*, no unit multiplier.    |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_pos                | nchan x 12      | The location information for each channel. The first three |
-    |                       |                 | values specify the origin of the sensor coordinate system  |
-    |                       |                 | or the location of the electrode. For MEG channels, the    |
-    |                       |                 | following nine number specify the *x*, *y*, and            |
-    |                       |                 | *z*-direction unit vectors of the sensor coordinate        |
-    |                       |                 | system. For EEG electrodes the first vector after the      |
-    |                       |                 | electrode location specifies the location of the reference |
-    |                       |                 | electrode. If the reference is not specified this value is |
-    |                       |                 | all zeroes. The remaining unit vectors are irrelevant for  |
-    |                       |                 | EEG electrodes.                                            |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | ch_cals               | nchan x 2       | The raw data output by mne_raw2mat are not calibrated.     |
-    |                       |                 | The first column is the range member of the fiff data      |
-    |                       |                 | structures and while the second is the cal member. To      |
-    |                       |                 | get calibrated data values in the units given in           |
-    |                       |                 | ch_units from the raw data, the data must be multiplied    |
-    |                       |                 | with the product of range and cal .                        |
-    +-----------------------+-----------------+------------------------------------------------------------+
-    | meg_head_trans        | 4 x 4           | The coordinate frame transformation from the MEG device    |
-    |                       |                 | coordinates to the MEG head coordinates.                   |
-    +-----------------------+-----------------+------------------------------------------------------------+
-
-
-.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
-.. _BEHHAGHE:
-.. table:: The epochs member of the raw data info structure.
-
-    +---------------+------------------------------------------------------------------+
-    | Column        | Contents                                                         |
-    +---------------+------------------------------------------------------------------+
-    | 1             | The raw data type (2 or 16 = 2-byte signed integer, 3 = 4-byte   |
-    |               | signed integer, 4 = single-precision float). The epoch data are  |
-    |               | written using the big-endian byte order. The data are stored     |
-    |               | sample by sample.                                                |
-    +---------------+------------------------------------------------------------------+
-    | 2             | Byte location of this epoch in the binary epoch file.            |
-    +---------------+------------------------------------------------------------------+
-    | 3             | First sample of this epoch in the original raw data file.        |
-    +---------------+------------------------------------------------------------------+
-    | 4             | First sample of the epoch with respect to the event.             |
-    +---------------+------------------------------------------------------------------+
-    | 5             | Number of samples in the epoch.                                  |
-    +---------------+------------------------------------------------------------------+
-
-.. note:: For source modelling purposes, it is recommended    that the MNE Matlab toolbox, see :ref:`ch_matlab` is employed    to read the measurement info instead of using the channel information    in the raw data info structure described in :ref:`BEHIFJIJ`.
diff --git a/doc/source/manual/cookbook.rst b/doc/source/manual/cookbook.rst
deleted file mode 100644
index 7613c71..0000000
--- a/doc/source/manual/cookbook.rst
+++ /dev/null
@@ -1,1066 +0,0 @@
-
-
-.. _ch_cookbook:
-
-============
-The Cookbook
-============
-
-Overview
-########
-
-This section describes the typical workflow needed to produce
-the minimum-norm estimate movies using the MNE software. The workflow
-is summarized in :ref:`CIHBIIAH`.
-
-.. _CIHBIIAH:
-
-.. figure:: pics/Flowchart.png
-    :alt: MNE Workflow Flowchart
-    :align: center
-
-    Workflow of the MNE software
-    
-    References in parenthesis indicate sections and chapters of this manual.
-
-Selecting the subject
-#####################
-
-Before starting the data analysis, setup the environment
-variable SUBJECTS_DIR to select the directory under which the anatomical
-MRI data are stored. Optionally, set SUBJECT as the name of the
-subject's MRI data directory under SUBJECTS_DIR. With this
-setting you can avoid entering the ``--subject`` option common to many
-MNE programs and scripts. In the following sections, files in the
-FreeSurfer directory hierarchy are usually referred to without specifying
-the leading directories. Thus, bem/msh-7-src.fif is used to refer
-to the file $SUBJECTS_DIR/$SUBJECT/bem/msh-7-src.fif.
-
-It is also recommended that the FreeSurfer environment
-is set up before using the MNE software.
-
-.. _CHDBBCEJ:
-
-Cortical surface reconstruction with FreeSurfer
-###############################################
-
-The first processing stage is the creation of various surface
-reconstructions with FreeSurfer .
-The recommended FreeSurfer workflow
-is summarized on the FreeSurfer wiki pages: https://surfer.nmr.mgh.harvard.edu/fswiki/RecommendedReconstruction.
-Please refer to the FreeSurfer wiki pages
-(https://surfer.nmr.mgh.harvard.edu/fswiki/) and other FreeSurfer documentation
-for more information.
-
-.. note:: Only the latest (4.0.X and later) FreeSurfer distributions    contain a version of tkmedit which    is compatible with mne_analyze, see :ref:`CACCHCBF`.
-
-.. _BABCCEHF:
-
-Setting up the anatomical MR images for MRIlab
-##############################################
-
-If you have the Neuromag software installed, the Neuromag
-MRI viewer, MRIlab, can be used to access the MRI slice data created
-by FreeSurfer . In addition, the
-Neuromag MRI directories can be used for storing the MEG/MRI coordinate
-transformations created with mne_analyze ,
-see :ref:`CACEHGCD`.  During the computation of the forward
-solution, mne_do_forwand_solution searches
-for the MEG/MRI coordinate in the Neuromag MRI directories, see :ref:`BABCHEJD`. The fif files created by mne_setup_mrit can
-be loaded into Matlab with the fiff_read_mri function,
-see :ref:`ch_matlab`.
-
-These functions require running the script mne_setup_mri which
-requires that the subject is set with the ``--subject`` option
-or by the SUBJECT environment variable. The script processes one
-or more MRI data sets from ``$SUBJECTS_DIR/$SUBJECT/mri`` ,
-by default they are T1 and brain. This default can be changed by
-specifying the sets by one or more ``--mri`` options.
-
-The script creates the directories ``mri/`` <*name*> ``-neuromag/slices`` and ``mri/`` <*name*> ``-neuromag/sets`` .
-If the input data set is in COR format, mne_setup_mri makes
-symbolic links from the COR files in the directory ``mri/`` <*name*> into ``mri/`` <*name*> ``-neuromag/slices`` ,
-and creates a corresponding fif file COR.fif in ``mri/`` <*name*> ``-neuromag/sets`` ..
-This "description file" contains references to
-the actual MRI slices.
-
-If the input MRI data are stored in the newer mgz format,
-the file created in the ``mri/`` <*name*> ``-neuromag/sets`` directory
-will include the MRI pixel data as well. If available, the coordinate
-transformations to allow conversion between the MRI (surface RAS)
-coordinates and MNI and FreeSurfer Talairach coordinates are copied
-to the MRI description file. mne_setup_mri invokes mne_make_cor_set ,
-described in :ref:`BABBHHHE` to convert the data.
-
-For example:
-
-``mne_setup_mri --subject duck_donald --mri T1``
-
-This command processes the MRI data set T1 for subject duck_donald.
-
-.. note:: If the SUBJECT environment variable is set it    is usually sufficient to run mne_setup_mri without    any options.
-
-.. note:: If the name specified with the ``--mri`` option    contains a slash, the MRI data are accessed from the directory specified    and the ``SUBJECT`` and ``SUBJECTS_DIR`` environment    variables as well as the ``--subject`` option are ignored.
-
-.. _CIHCHDAE:
-
-Setting up the source space
-###########################
-
-This stage consists of the following:
-
-- Creating a suitable decimated dipole
-  grid on the white matter surface.
-
-- Creating the source space file in fif format.
-
-- Creating ascii versions of the source space file for viewing
-  with MRIlab.
-
-All of the above is accomplished with the convenience script mne_setup_source_space . This
-script assumes that:
-
-- The anatomical MRI processing has been
-  completed as described in :ref:`CHDBBCEJ`.
-
-- The environment variable SUBJECTS_DIR is set correctly.
-
-The script accepts the following options:
-
-**\---subject <*subject*>**
-
-    Defines the name of the subject. If the environment variable SUBJECT
-    is set correctly, this option is not required.
-
-**\---morph <*name*>**
-
-    Name of a subject in SUBJECTS_DIR. If this option is present, the source
-    space will be first constructed for the subject defined by the --subject
-    option or the SUBJECT environment variable and then morphed to this
-    subject. This option is useful if you want to create a source spaces
-    for several subjects and want to directly compare the data across
-    subjects at the source space vertices without any morphing procedure
-    afterwards. The drawback of this approach is that the spacing between
-    source locations in the "morph" subject is not going
-    to be as uniform as it would be without morphing.
-
-**\---spacing <*spacing/mm*>**
-
-    Specifies the grid spacing for the source space in mm. If not set,
-    a default spacing of 7 mm is used. Either the default or a 5-mm
-    spacing is recommended.
-
-**\---ico <*number*>**
-
-    Instead of using the traditional method for cortical surface decimation
-    it is possible to create the source space using the topology of
-    a recursively subdivided icosahedron (<*number*> > 0)
-    or an octahedron (<*number*> < 0).
-    This method uses the cortical surface inflated to a sphere as a
-    tool to find the appropriate vertices for the source space. The
-    benefit of the ``--ico`` option is that the source space
-    will have triangulation information for the decimated vertices included, which
-    future versions of MNE software may be able to utilize. The number
-    of triangles increases by a factor of four in each subdivision,
-    starting from 20 triangles in an icosahedron and 8 triangles in an
-    octahedron. Since the number of vertices on a closed surface is :math:`n_{vert} = (n_{tri} + 4)/2`,
-    the number of vertices in the *k* th subdivision of
-    an icosahedron and an octahedron are :math:`10 \cdot 4^k + 2` and :math:`4^{k + 1} + 2`, respectively.
-    The recommended values for <*number*> and
-    the corresponding number of source space locations are listed in :ref:`BABGCDHA`.
-
-**\---surface <*name*>**
-
-    Name of the surface under the surf directory to be used. Defaults
-    to 'white'. ``mne_setup_source_space`` looks
-    for files ``rh.`` <*name*> and ``lh.`` <*name*> under
-    the ``surf`` directory.
-
-**\---overwrite**
-
-    An existing source space file with the same name is overwritten only
-    if this option is specified.
-
-**\---cps**
-
-    Compute the cortical patch statistics. This is need if current-density estimates
-    are computed, see :ref:`CBBDBHDI`. If the patch information is
-    available in the source space file the surface normal is considered to
-    be the average normal calculated over the patch instead of the normal
-    at each source space location. The calculation of this information
-    takes a considerable amount of time because of the large number
-    of Dijkstra searches involved.
-
-.. _BABGCDHA:
-
-.. table:: Recommended subdivisions of an icosahedron and an octahedron for the creation of source spaces. The approximate source spacing and corresponding surface area have been calculated assuming a 1000-cm2 surface area per hemisphere.
-
-    ==========  ========================  =====================  ===============================
-    <*number*>  Sources per hemisphere    Source spacing / mm    Surface area per source / mm2
-    ==========  ========================  =====================  ===============================
-    -5          1026                      9.9                    97
-    4           2562                      6.2                    39
-    -6          4098                      4.9                    24
-    5           10242                     3.1                    9.8
-    ==========  ========================  =====================  ===============================
-
-For example, to create the reconstruction geometry for Donald
-Duck with a 5-mm spacing between the grid points, say
-
-``mne_setup_source_space --subject duck_donald --spacing 5``
-
-As a result, the following files are created into the ``bem`` directory:
-
-- <*subject*>-<*spacing*>- ``src.fif`` containing
-  the source space description in fif format.
-
-- <*subject*>-<*spacing*>- ``lh.pnt`` and <*subject*>-<*spacing*>- ``rh.pnt`` containing
-  the source space points in MRIlab compatible ascii format.
-
-- <*subject*>-<*spacing*>- ``lh.dip`` and <*subject*>-<*spacing*>- ``rh.dip`` containing
-  the source space points in MRIlab compatible ascii format. These
-  files contain 'dipoles', *i.e.*,
-  both source space points and cortex normal directions.
-
-- If cortical patch statistics is requested, another source
-  space file called <*subject*>-<*spacing*> ``p-src.fif`` will
-  be created.
-
-.. note:: <*spacing*> will    be the suggested source spacing in millimeters if the ``--spacing`` option    is used. For source spaces based on *k*th subdivision    of an icosahedron, <*spacing*> will    be replaced by ``ico-`` k or ``oct-`` k , respectively.
-
-.. note:: After the geometry is set up it is possible to    check that the source space points are located on the cortical surface.    This can be easily done with by loading the ``COR.fif`` file    from ``mri/T1/neuromag/sets`` into MRIlab and by subsequently    overlaying the corresponding pnt or dip files using Import/Strings or Import/Dipoles from    the File menu, respectively.
-
-.. note:: If the SUBJECT environment variable is set correctly    it is usually sufficient to run ``mne_setup_source_space`` without    any options.
-
-.. _CHDBJCIA:
-
-Creating the BEM model meshes
-#############################
-
-Calculation of the forward solution using the boundary-element
-model (BEM) requires that the surfaces separating regions of different
-electrical conductivities are tessellated with suitable surface
-elements. Our BEM software employs triangular tessellations. Therefore,
-prerequisites for BEM calculations are the segmentation of the MRI
-data and the triangulation of the relevant surfaces.
-
-For MEG computations, a reasonably accurate solution can
-be obtained by using a single-compartment BEM assuming the shape
-of the intracranial volume. For EEG, the standard model contains
-the intracranial space, the skull, and the scalp.
-
-At present, no bulletproof method exists for creating the
-triangulations. Feasible approaches are described in :ref:`create_bem_model`.
-
-.. _BABDBBFC:
-
-Setting up the triangulation files
-==================================
-
-The segmentation algorithms described in :ref:`create_bem_model` produce
-either FreeSurfer surfaces or triangulation
-data in text. Before proceeding to the creation of the boundary
-element model, standard files (or symbolic links created with the ``ln -s`` command) have to be present in the subject's ``bem`` directory.
-If you are employing ASCII triangle files the standard file names
-are:
-
-**inner_skull.tri**
-
-    Contains the inner skull triangulation.
-
-**outer_skull.tri**
-
-    Contains the outer skull triangulation.
-
-**outer_skin.tri**
-
-    Contains the head surface triangulation.
-
-The corresponding names for FreeSurfer surfaces
-are:
-
-**inner_skull.surf**
-
-    Contains the inner skull triangulation.
-
-**outer_skull.surf**
-
-    Contains the outer skull triangulation.
-
-**outer_skin.surf**
-
-    Contains the head surface triangulation.
-
-.. note:: Different methods can be employed for the creation    of the individual surfaces. For example, it may turn out that the    watershed algorithm produces are better quality skin surface than    the segmentation approach based on the FLASH images. If this is    the case, ``outer_skin.surf`` can set to point to the corresponding    watershed output file while the other surfaces can be picked from    the FLASH segmentation data.
-
-.. note:: The triangulation files can include name of the    subject as a prefix ``<*subject name*>-`` , *e.g.*, ``duck-inner_skull.surf`` .
-
-.. note:: The mne_convert_surface utility    described in :ref:`BEHDIAJG` can be used to convert text format    triangulation files into the FreeSurfer surface format.
-
-.. note:: "Aliases" created with    the Mac OSX finder are not equivalent to symbolic links and do not    work as such for the UNIX shells and MNE programs.
-
-.. _CIHDBFEG:
-
-Setting up the boundary-element model
-#####################################
-
-This stage sets up the subject-dependent data for computing
-the forward solutions:
-
-- The fif format boundary-element model
-  geometry file is created. This step also checks that the input surfaces
-  are complete and that they are topologically correct, *i.e.*,
-  that the surfaces do not intersect and that the surfaces are correctly
-  ordered (outer skull surface inside the scalp and inner skull surface
-  inside the outer skull). Furthermore, the range of triangle sizes
-  on each surface is reported. For the three-layer model, the minimum
-  distance between the surfaces is also computed.
-
-- Text files containing the boundary surface vertex coordinates are
-  created.
-
-- The the geometry-dependent BEM solution data are computed. This step
-  can be optionally omitted. This step takes several minutes to complete.
-
-This step assigns the conductivity values to the BEM compartments.
-For the scalp and the brain compartments, the default is 0.3 S/m.
-The default skull conductivity is 50 times smaller, *i.e.*,
-0.006 S/m. Recent publications, see :ref:`CEGEGDEI`, report
-a range of skull conductivity ratios ranging from 1:15 (Oostendorp *et
-al.*, 2000) to 1:25 - 1:50 (Slew *et al.*,
-2009, Conçalves *et al.*, 2003). The
-MNE default ratio 1:50 is based on the typical values reported in
-(Conçalves *et al.*, 2003), since their
-approach is based comparison of SEF/SEP measurements in a BEM model.
-The variability across publications may depend on individual variations
-but, more importantly, on the precision of the skull compartment
-segmentation.
-
-This processing stage is automated with the script mne_setup_forward_model . This
-script assumes that:
-
-- The anatomical MRI processing has been
-  completed as described in :ref:`CHDBBCEJ`.
-
-- The BEM model meshes have been created as outlined in :ref:`CHDBJCIA`.
-
-- The environment variable SUBJECTS_DIR is set correctly.
-
-mne_setup_forward_model accepts
-the following options:
-
-**\---subject <*subject*>**
-
-    Defines the name of the subject. This can be also accomplished
-    by setting the SUBJECT environment variable.
-
-**\---surf**
-
-    Use the FreeSurfer surface files instead of the default ASCII triangulation
-    files. Please consult :ref:`BABDBBFC` for the standard file
-    naming scheme.
-
-**\---noswap**
-
-    Traditionally, the vertices of the triangles in 'tri' files
-    have been ordered so that, seen from the outside of the triangulation,
-    the vertices are ordered in clockwise fashion. The fif files, however,
-    employ the more standard convention with the vertices ordered counterclockwise.
-    Therefore, mne_setup_forward_model by
-    default reverses the vertex ordering before writing the fif file.
-    If, for some reason, you have counterclockwise-ordered tri files
-    available this behavior can be turned off by defining ``--noswap`` .
-    When the fif file is created, the vertex ordering is checked and
-    the process is aborted if it is incorrect after taking into account
-    the state of the swapping. Should this happen, try to run mne_setup_forward_model again including
-    the ``--noswap`` flag. In particular, if you employ the seglab software
-    to create the triangulations (see :ref:`create_bem_model`), the ``--noswap`` flag
-    is required. This option is ignored if ``--surf`` is specified
-
-**\---ico <*number*>**
-
-    This option is relevant (and required) only with the ``--surf`` option and
-    if the surface files have been produced by the watershed algorithm.
-    The watershed triangulations are isomorphic with an icosahedron,
-    which has been recursively subdivided six times to yield 20480 triangles.
-    However, this number of triangles results in a long computation
-    time even in a workstation with generous amounts of memory. Therefore,
-    the triangulations have to be decimated. Specifying ``--ico 4`` yields 5120 triangles per surface while ``--ico 3`` results
-    in 1280 triangles. The recommended choice is ``--ico 4`` .
-
-**\---homog**
-
-    Use a single compartment model (brain only) instead a three layer one
-    (scalp, skull, and brain). Only the ``inner_skull.tri`` triangulation
-    is required. This model is usually sufficient for MEG but invalid
-    for EEG. If you are employing MEG data only, this option is recommended
-    because of faster computation times. If this flag is specified,
-    the options ``--brainc`` , ``--skullc`` , and ``--scalpc`` are irrelevant.
-
-**\---brainc <*conductivity/ S/m*>**
-
-    Defines the brain compartment conductivity. The default value is 0.3 S/m.
-
-**\---skullc <*conductivity/ S/m*>**
-
-    Defines the skull compartment conductivity. The default value is 0.006 S/m
-    corresponding to a conductivity ratio 1/50 between the brain and
-    skull compartments.
-
-**\---scalpc <*conductivity/ S/m*>**
-
-    Defines the brain compartment conductivity. The default value is 0.3 S/m.
-
-**\---innershift <*value/mm*>**
-
-    Shift the inner skull surface outwards along the vertex normal directions
-    by this amount.
-
-**\---outershift <*value/mm*>**
-
-    Shift the outer skull surface outwards along the vertex normal directions
-    by this amount.
-
-**\---scalpshift <*value/mm*>**
-
-    Shift the scalp surface outwards along the vertex normal directions by
-    this amount.
-
-**\---nosol**
-
-    Omit the BEM model geometry dependent data preparation step. This
-    can be done later by running mne_setup_forward_model without the ``--nosol`` option.
-
-**\---model <*name*>**
-
-    Name for the BEM model geometry file. The model will be created into
-    the directory bem as <*name*>- ``bem.fif`` .	If
-    this option is missing, standard model names will be used (see below).
-
-As a result of running the mne_setup_foward_model script, the
-following files are created into the ``bem`` directory:
-
-- BEM model geometry specifications <*subject*>-<*ntri-scalp*>-<*ntri-outer_skull*>-<*ntri-inner_skull*>- ``bem.fif`` or <*subject*>-<*ntri-inner_skull*> ``-bem.fif`` containing
-  the BEM geometry in fif format. The latter file is created if ``--homog``
-  option is specified. Here, <*ntri-xxx*> indicates
-  the number of triangles on the corresponding surface.
-
-- <*subject*>-<*surface name*>-<*ntri*> ``.pnt`` files
-  are created for each of the surfaces present in the BEM model. These
-  can be loaded to MRIlab to check the location of the surfaces.
-
-- <*subject*>-<*surface name*>-<*ntri*> ``.surf`` files
-  are created for each of the surfaces present in the BEM model. These
-  can be loaded to tkmedit to check
-  the location of the surfaces.
-
-- The BEM 'solution' file containing the geometry
-  dependent solution data will be produced with the same name as the
-  BEM geometry specifications with the ending ``-bem-sol.fif`` .
-  These files also contain all the information in the ``-bem.fif`` files.
-
-After the BEM is set up it is advisable to check that the
-BEM model meshes are correctly positioned. This can be easily done
-with by loading the COR.fif file
-from mri/T1-neuromag/sets into
-MRIlab and by subsequently overlaying the corresponding pnt files
-using Import/Strings from the File menu.
-
-.. note:: The FreeSurfer format    BEM surfaces can be also viewed with the tkmedit program    which is part of the FreeSurfer distribution.
-
-.. note:: If the SUBJECT environment variable is set, it    is usually sufficient to run ``mne_setup_forward_model`` without    any options for the three-layer model and with the ``--homog`` option    for the single-layer model. If the input files are FreeSurfer surfaces, ``--surf`` and ``--ico 4`` are required as well.
-
-.. note:: With help of the ``--nosol`` option    it is possible to create candidate BEM geometry data files quickly    and do the checking with respect to the anatomical MRI data. When    the result is satisfactory, mne_setup_forward_model can be run without ``--nosol`` to    invoke the time-consuming calculation of the solution file as well.
-
-.. note:: The triangle meshes created by the seglab program    have counterclockwise vertex ordering and thus require the ``--noswap``    option.
-
-.. note:: Up to this point all processing stages depend    on the anatomical (geometrical) information only and thus remain    identical across different MEG studies.
-
-Setting up the MEG/EEG analysis directory
-#########################################
-
-The remaining steps require that the actual MEG/EEG data
-are available. It is recommended that a new directory is created
-for the MEG/EEG data processing. The raw data files collected should not be
-copied there but rather referred to with symbolic links created
-with the ``ln -s`` command. Averages calculated
-on-line can be either copied or referred to with links.
-
-.. note:: If you don't know how to create a directory,    how to make symbolic links, or how to copy files from the shell    command line, this is a perfect time to learn about this basic skills    from other users or from a suitable elementary book before proceeding.
-
-Preprocessing the raw data
-##########################
-
-The following MEG and EEG data preprocessing steps are recommended:
-
-- The coding problems on the trigger channel
-  STI 014 may have to fixed, see :ref:`BABCDBDI`.
-
-- EEG electrode location information and MEG coil types may
-  need to be fixed, see :ref:`BABCDFJH`.
-
-- The data may be optionally downsampled to facilitate subsequent
-  processing, see :ref:`BABDGFFG`.
-
-- Bad channels in the MEG and EEG data must be identified, see :ref:`BABBHCFG`.
-
-- The data has to be filtered to the desired passband. If mne_browse_raw or mne_process_raw is
-  employed to calculate the offline averages and covariance matrices,
-  this step is unnecessary since the data are filtered on the fly.
-  For information on these programs, please consult :ref:`ch_browse`.
-
-- For evoked-response analysis, the data has to be re-averaged
-  off line, see :ref:`BABEAEDF`.
-
-.. _BABCDBDI:
-
-Cleaning the digital trigger channel
-====================================
-
-The calibration factor of the digital trigger channel used
-to be set to a value much smaller than one by the Neuromag data
-acquisition software. Especially to facilitate viewing of raw data
-in graph it is advisable to change the calibration factor to one.
-Furthermore, the eighth bit of the trigger word is coded incorrectly
-in the original raw files. Both problems can be corrected by saying:
-
-``mne_fix_stim14`` <*raw file*>
-
-More information about mne_fix_stim14 is
-available in :ref:`CHDBFDIC`. It is recommended that this
-fix is included as the first raw data processing step. Note, however,
-the mne_browse_raw and mne_process_raw always sets
-the calibration factor to one internally.
-
-.. note:: If your data file was acquired on or after November 10, 2005 on the Martinos center Vectorview system, it is not necessary to use mne_fix_stim14 .
-
-.. _BABCDFJH:
-
-Fixing channel information
-==========================
-
-There are two potential discrepancies in the channel information
-which need to be fixed before proceeding:
-
-- EEG electrode locations may be incorrect
-  if more than 60 EEG channels are acquired.
-
-- The magnetometer coil identifiers are not always correct.
-
-These potential problems can be fixed with the utilities mne_check_eeg_locations and mne_fix_mag_coil_types,
-see :ref:`CHDJGGGC` and :ref:`CHDGAAJC`.
-
-.. _BABBHCFG:
-
-Designating bad channels
-========================
-
-Sometimes some MEG or EEG channels are not functioning properly
-for various reasons. These channels should be excluded from the
-analysis by marking them bad using the mne_mark_bad_channels utility,
-see :ref:`CHDDHBEE`. Especially if a channel does not show
-a signal at all (flat) it is most important to exclude it from the
-analysis, since its noise estimate will be unrealistically low and
-thus the current estimate calculations will give a strong weight
-to the zero signal on the flat channels and will essentially vanish.
-It is also important to exclude noisy channels because they can
-possibly affect others when signal-space projections or EEG average electrode
-reference is employed. Noisy bad channels can also adversely affect
-off-line averaging and noise-covariance matrix estimation by causing
-unnecessary rejections of epochs.
-
-Recommended ways to identify bad channels are:
-
-- Observe the quality of data during data
-  acquisition and make notes of observed malfunctioning channels to
-  your measurement protocol sheet.
-
-- View the on-line averages and check the condition of the channels.
-
-- Compute preliminary off-line averages with artefact rejection,
-  signal-space projection, and EEG average electrode reference computation
-  off and check the condition of the channels.
-
-- View raw data in mne_process_raw or
-  the Neuromag signal processor graph without
-  signal-space projection or EEG average electrode reference computation
-  and identify bad channels.
-
-.. note:: It is strongly recommended that bad channels    are identified and marked in the original raw data files. If present    in the raw data files, the bad channel selections will be automatically    transferred to averaged files, noise-covariance matrices, forward    solution files, and inverse operator decompositions.
-
-.. _BABDGFFG:
-
-Downsampling the MEG/EEG data
-=============================
-
-The minimum practical sampling frequency of the Vectorview
-system is 600 Hz. Lower sampling frequencies are allowed but result
-in elevated noise level in the data. It is advisable to lowpass
-filter and downsample the large raw data files often emerging in
-cognitive and patient studies to speed up subsequent processing.
-This can be accomplished with the mne_process_raw and mne_browse_raw software
-modules. For details, see :ref:`CACFAAAJ` and :ref:`CACBDDIE`.
-
-.. note:: It is recommended that the original raw file    is called <*name*>_raw.fif and    the downsampled version <*name*>_ds_raw.fif ,    respectively.
-
-.. _BABEAEDF:
-
-Off-line averaging
-==================
-
-The recommended tools for off-line averaging are mne_browse_raw and mne_process_raw . mne_browse_raw is
-an interactive program for averaging and noise-covariance matrix
-computation. It also includes routines for filtering so that the
-downsampling and filtering steps can be skipped. Therefore, with mne_browse_raw you
-can produce the off-line average and noise-covariance matrix estimates
-directly. The batch-mode version of mne_browse_raw is
-called mne_process_raw . Detailed
-information on mne_browse_raw and mne_process_raw can
-be found in :ref:`ch_browse`.
-
-.. _CHDBEHDC:
-
-Aligning the coordinate frames
-##############################
-
-The calculation of the forward solution requires knowledge
-of the relative location and orientation of the MEG/EEG and MRI
-coordinate systems. The MEG/EEG head coordinate system is defined
-in :ref:`BJEBIBAI`. The conversion tools included in the MNE
-software take care of the idiosyncrasies of the coordinate frame
-definitions in different MEG and EEG systems so that the fif files
-always employ the same definition of the head coordinate system.
-
-Ideally, the head coordinate frame has a fixed orientation
-and origin with respect to the head anatomy. Therefore, a single
-MRI-head coordinate transformation for each subject should be sufficient.
-However, as explained in :ref:`BJEBIBAI`, the head coordinate
-frame is defined by identifying the fiducial landmark locations,
-making the origin and orientation of the head coordinate system
-slightly user dependent. As a result, the most conservative choice
-for the definition of the coordinate transformation computation
-is to re-establish it for each experimental session, *i.e.*,
-each time when new head digitization data are employed.
-
-The interactive source analysis software mne_analyze provides
-tools for coordinate frame alignment, see :ref:`ch_interactive_analysis`. :ref:`CHDIJBIG` also
-contains tips for using mne_analyze for
-this purpose.
-
-Another useful tool for the coordinate system alignment is MRIlab ,
-the Neuromag MEG-MRI integration tool. Section 3.3.1 of the MRIlab User's
-Guide, Neuromag P/N NM20419A-A contains a detailed description of
-this task. Employ the images in the set ``mri/T1-neuromag/sets/COR.fif`` for
-the alignment. Check the alignment carefully using the digitization
-data included in the measurement file as described in Section 5.3.1
-of the above manual. Save the aligned description file in the same
-directory as the original description file without the alignment
-information but under a different name.
-
-.. warning:: This step is extremely important. If    the alignment of the coordinate frames is inaccurate all subsequent    processing steps suffer from the error. Therefore, this step should    be performed by the person in charge of the study or by a trained    technician. Written or photographic documentation of the alignment    points employed during the MEG/EEG acquisition can also be helpful.
-
-.. _BABCHEJD:
-
-Computing the forward solution
-##############################
-
-After the MRI-MEG/EEG alignment has been set, the forward
-solution, *i.e.*, the magnetic fields and electric
-potentials at the measurement sensors and electrodes due to dipole
-sources located on the cortex, can be calculated with help of the
-convenience script mne_do_forward_solution .
-This utility accepts the following options:
-
-**\---subject <*subject*>**
-
-    Defines the name of the subject. This can be also accomplished
-    by setting the SUBJECT environment variable.
-
-**\---src <*name*>**
-
-    Source space name to use. This option overrides the ``--spacing`` option. The
-    source space is searched first from the current working directory
-    and then from ``$SUBJECTS_DIR/`` <*subject*> /bem.
-    The source space file must be specified exactly, including the ``fif`` extension.
-
-**\---spacing <*spacing/mm*>  or ``ico-`` <*number  or ``oct-`` <*number*>**
-
-    This is an alternate way to specify the name of the source space
-    file. For example, if ``--spacing 6`` is given on the command
-    line, the source space files searched for are./<*subject*> -6-src.fif
-    and ``$SUBJECTS_DIR/$SUBJECT/`` bem/<*subject*> -6-src.fif.
-    The first file found is used. Spacing defaults to 7 mm.
-
-**\---bem <*name*>**
-
-    Specifies the BEM to be used. The name of the file can be any of <*name*> , <*name*> -bem.fif, <*name*> -bem-sol.fif.
-    The file is searched for from the current working directory and
-    from ``bem`` . If this option is omitted, the most recent
-    BEM file in the ``bem`` directory is used.
-
-**\---mri <*name*>**
-
-    The name of the MRI description file containing the MEG/MRI coordinate
-    transformation. This file was saved as part of the alignment procedure
-    outlined in :ref:`CHDBEHDC`. The file is searched for from
-    the current working directory and from ``mri/T1-neuromag/sets`` .
-    The search order for MEG/MRI coordinate transformations is discussed
-    below.
-
-**\---trans	 <*name*>**
-
-    The name of a text file containing the 4 x 4 matrix for the coordinate transformation
-    from head to mri coordinates, see below. If the option ``--trans`` is
-    present, the ``--mri`` option is not required. The search
-    order for MEG/MRI coordinate transformations is discussed below.
-
-**\---meas <*name*>**
-
-    This file is the measurement fif file or an off-line average file
-    produced thereof. It is recommended that the average file is employed for
-    evoked-response data and the original raw data file otherwise. This
-    file provides the MEG sensor locations and orientations as well as
-    EEG electrode locations as well as the coordinate transformation between
-    the MEG device coordinates and MEG head-based coordinates.
-
-**\---fwd <*name*>**
-
-    This file will contain the forward solution as well as the coordinate transformations,
-    sensor and electrode location information, and the source space
-    data. A name of the form <*name*> ``-fwd.fif`` is
-    recommended. If this option is omitted the forward solution file
-    name is automatically created from the measurement file name and
-    the source space name.
-
-**\---destdir <*directory*>**
-
-    Optionally specifies a directory where the forward solution will
-    be stored.
-
-**\---mindist <*dist/mm*>**
-
-    Omit source space points closer than this value to the inner skull surface.
-    Any source space points outside the inner skull surface are automatically
-    omitted. The use of this option ensures that numerical inaccuracies
-    for very superficial sources do not cause unexpected effects in
-    the final current estimates. Suitable value for this parameter is
-    of the order of the size of the triangles on the inner skull surface.
-    If you employ the seglab software
-    to create the triangulations, this value should be about equal to
-    the wish for the side length of the triangles.
-
-**\---megonly**
-
-    Omit EEG forward calculations.
-
-**\---eegonly**
-
-    Omit MEG forward calculations.
-
-**\---all**
-
-    Compute the forward solution for all vertices on the source space.
-
-**\---overwrite**
-
-    Overwrite the possibly existing forward model file.
-
-**\---help**
-
-    Show usage information for the script.
-
-The MEG/MRI transformation is determined by the following
-search sequence:
-
-- If the ``--mri`` option was
-  present, the file is looked for literally as specified, in the directory
-  of the measurement file specified with the ``--meas`` option,
-  and in the directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets.
-  If the file is not found, the script exits with an error message.
-
-- If the ``--trans`` option was present, the file is
-  looked up literally as specified. If the file is not found, the
-  script exists with an error message.
-
-- If neither ``--mri`` nor ``--trans`` option
-  was not present, the following default search sequence is engaged:
-
-  - The ``.fif`` ending in the
-    measurement file name is replaced by ``-trans.fif`` . If
-    this file is present, it will be used.
-
-  - The newest file whose name ends with ``-trans.fif`` in
-    the directory of the measurement file is looked up. If such a file
-    is present, it will be used.
-
-  - The newest file whose name starts with ``COR-`` in
-    directory $SUBJECTS_DIR/$SUBJECT/mri/T1-neuromag/sets is looked
-    up. If such a file is present, it will be used.
-
-  - If all the above searches fail, the script exits with an error
-    message.
-
-This search sequence is designed to work well with the MEG/MRI
-transformation files output by mne_analyze ,
-see :ref:`CACEHGCD`. It is recommended that -trans.fif file
-saved with the Save default and Save... options in
-the mne_analyze alignment dialog
-are used because then the $SUBJECTS_DIR/$SUBJECT directory will
-be composed of files which are dependent on the subjects's
-anatomy only, not on the MEG/EEG data to be analyzed.
-
-.. note:: If the standard MRI description file and BEM    file selections are appropriate and the 7-mm source space grid spacing    is appropriate, only the ``--meas`` option is necessary.    If EEG data is not used ``--megonly`` option should be    included.
-
-.. note:: If it is conceivable that the current-density    transformation will be incorporated into the inverse operator, specify    a source space with patch information for the forward computation.    This is not mandatory but saves a lot of time when the inverse operator    is created, since the patch information does not need to be created    at that stage.
-
-.. note:: The MEG head to MRI transformation matrix specified    with the ``--trans`` option should be a text file containing    a 4-by-4 matrix:
-
-.. math::    T = \begin{bmatrix}
-		R_{11} & R_{12} & R_{13} & x_0 \\
-		R_{13} & R_{13} & R_{13} & y_0 \\
-		R_{13} & R_{13} & R_{13} & z_0 \\
-		0 & 0 & 0 & 1
-		\end{bmatrix}
-	     
-defined so that if the augmented location vectors in MRI
-head and MRI coordinate systems are denoted by :math:`r_{head}[x_{head}\ y_{head}\ z_{head}\ 1]` and :math:`r_{MRI}[x_{MRI}\ y_{MRI}\ z_{MRI}\ 1]`,
-respectively,
-
-.. math::    r_{MRI} = T r_{head}
-
-.. note:: It is not possible to calculate an EEG forward    solution with a single-layer BEM.
-
-.. _BABDEEEB:
-
-Setting up the noise-covariance matrix
-######################################
-
-The MNE software employs an estimate of the noise-covariance
-matrix to weight the channels correctly in the calculations. The
-noise-covariance matrix provides information about field and potential
-patterns representing uninteresting noise sources of either human
-or environmental origin.
-
-The noise covariance matrix can be calculated in several
-ways:
-
-- Employ the individual epochs during
-  off-line averaging to calculate the full noise covariance matrix.
-  This is the recommended approach for evoked responses.
-
-- Employ empty room data (collected without the subject) to
-  calculate the full noise covariance matrix. This is recommended
-  for analyzing ongoing spontaneous activity.
-
-- Employ a section of continuous raw data collected in the presence
-  of the subject to calculate the full noise covariance matrix. This
-  is the recommended approach for analyzing epileptic activity. The
-  data used for this purpose should be free of technical artifacts
-  and epileptic activity of interest. The length of the data segment
-  employed should be at least 20 seconds. One can also use a long
-  (`*> 200 s`) segment of data with epileptic spikes present provided
-  that the spikes occur infrequently and that the segment is apparently
-  stationary with respect to background brain activity.
-
-The new raw data processing tools, mne_browse_raw or mne_process_raw include
-computation of noise-covariance matrices both from raw data and
-from individual epochs. For details, see :ref:`ch_browse`.
-
-.. _CIHCFJEI:
-
-Calculating the inverse operator decomposition
-##############################################
-
-The MNE software doesn't calculate the inverse operator
-explicitly but rather computes an SVD of a matrix composed of the
-noise-covariance matrix, the result of the forward calculation,
-and the source covariance matrix. This approach has the benefit
-that the regularization parameter ('SNR') can
-be adjusted easily when the final source estimates or dSPMs are
-computed. For mathematical details of this approach, please consult :ref:`CBBDJFBJ`.
-
-This computation stage is facilitated by the convenience
-script mne_do_inverse_operator . It
-invokes the program mne_inverse_operator with
-appropriate options, derived from the command line of mne_do_inverse_operator .
-
-mne_do_inverse_operator assumes
-the following options:
-
-**\---fwd <*name of the forward solution file*>**
-
-    This is the forward solution file produced in the computations step described
-    in :ref:`BABCHEJD`.
-
-**\---meg**
-
-    Employ MEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
-    set only MEG channels are included.
-
-**\---eeg**
-
-    Employ EEG data in the inverse calculation. If neither ``--meg`` nor ``--eeg`` is
-    set only MEG channels are included.
-
-**\---fixed**
-
-    Use fixed source orientations normal to the cortical mantle. By default,
-    the source orientations are not constrained. If ``--fixed`` is specified,
-    the ``--loose`` flag is ignored.
-
-**\---loose <*amount*>**
-
-    Use a 'loose' orientation constraint. This means
-    that the source covariance matrix entries corresponding to the current
-    component normal to the cortex are set equal to one and the transverse
-    components are set to <*amount*> .
-    Recommended value of amount is 0.1...0.6.
-
-**\---depth**
-
-    Employ depth weighting with the standard settings. For details,
-    see :ref:`CBBDFJIE` and :ref:`CBBDDBGF`.
-
-**\---bad <*name*>**
-
-    Specifies a text file to designate bad channels, listed one channel name
-    (like MEG 1933) on each line of the file. Be sure to include both
-    noisy and flat (non-functioning) channels in the list. If bad channels
-    were designated using mne_mark_bad_channels in
-    the measurement file which was specified with the ``--meas`` option when
-    the forward solution was computed, the bad channel information will
-    be automatically included. Also, any bad channel information in
-    the noise-covariance matrix file will be included.
-
-**\---noisecov <*name*>**
-
-    Name of the noise-covariance matrix file computed with one of the methods
-    described in :ref:`BABDEEEB`. By default, the script looks
-    for a file whose name is derived from the forward solution file
-    by replacing its ending ``-`` <*anything*> ``-fwd.fif`` by ``-cov.fif`` .
-    If this file contains a projection operator, which will automatically
-    attached to the noise-covariance matrix by mne_browse_raw and mne_process_raw ,
-    no ``--proj`` option is necessary because mne_inverse_operator will
-    automatically include the projectors from the noise-covariance matrix
-    file. For backward compatibility, --senscov can be used as a synonym
-    for --noisecov.
-
-**\---noiserank <*value*>**
-
-    Specifies the rank of the noise covariance matrix explicitly rather than
-    trying to reduce it automatically. This option is sheldom needed,
-
-**\---megreg <*value*>**
-
-    Regularize the MEG part of the noise-covariance matrix by this amount.
-    Suitable values are in the range 0.05...0.2. For details, see :ref:`CBBHEGAB`.
-
-**\---eegreg <*value*>**
-
-    Like ``--megreg`` but applies to the EEG channels.
-
-**\---diagnoise**
-
-    Omit the off-diagonal terms of the noise covariance matrix. This option
-    is irrelevant to most users.
-
-**\---fmri <*name*>**
-
-    With help of this w file, an *a priori* weighting
-    can be applied to the source covariance matrix. The source of the weighting
-    is usually fMRI but may be also some other data, provided that the weighting can
-    be expressed as a scalar value on the cortical surface, stored in
-    a w file. It is recommended that this w file is appropriately smoothed (see :ref:`CHDEBAHH`)
-    in mne_analyze , tksurfer or
-    with mne_smooth_w to contain
-    nonzero values at all vertices of the triangular tessellation of
-    the cortical surface. The name of the file given is used as a stem of
-    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
-    the left and right hemisphere weight files, respectively. The application
-    of the weighting is discussed in :ref:`CBBDIJHI`.
-
-**\---fmrithresh <*value*>**
-
-    This option is mandatory and has an effect only if a weighting function
-    has been specified with the ``--fmri`` option. If the value
-    is in the *a priori* files falls below this value
-    at a particular source space point, the source covariance matrix
-    values are multiplied by the value specified with the ``--fmrioff`` option
-    (default 0.1). Otherwise it is left unchanged.
-
-**\---fmrioff <*value*>**
-
-    The value by which the source covariance elements are multiplied
-    if the *a priori* weight falls below the threshold
-    set with ``--fmrithresh`` , see above.
-
-**\---srccov <*name*>**
-
-    Use this diagonal source covariance matrix. By default the source covariance
-    matrix is a multiple of the identity matrix. This option is irrelevant
-    to most users.
-
-**\---proj <*name*>**
-
-    Include signal-space projection information from this file.
-
-**\---inv <*name*>**
-
-    Save the inverse operator decomposition here. By default, the script looks
-    for a file whose name is derived from the forward solution file by
-    replacing its ending ``-fwd.fif`` by <*options*> ``-inv.fif`` , where
-    <*options*> includes options ``--meg``, ``--eeg``, and ``--fixed`` with the double
-    dashes replaced by single ones.
-
-**\---destdir <*directory*>**
-
-    Optionally specifies a directory where the inverse operator will
-    be stored.
-
-.. note:: If bad channels are included in the calculation,    strange results may ensue. Therefore, it is recommended that the    data to be analyzed is carefully inspected with to assign the bad    channels correctly.
-
-.. note:: For convenience, the MNE software includes bad-channel    designation files which can be used to ignore all magnetometer or    all gradiometer channels in Vectorview measurements. These files are    called ``vv_grad_only.bad`` and ``vv_mag_only.bad`` , respectively.    Both files are located in ``$MNE_ROOT/share/mne/templates`` .
-
-Analyzing the data
-##################
-
-Once all the preprocessing steps described above have been
-completed, the inverse operator computed can be applied to the MEG
-and EEG data and the results can be viewed and stored in several
-ways:
-
-- The interactive analysis tool mne_analyze can
-  be used to explore the data and to produce quantitative analysis
-  results, screen snapshots, and QuickTime (TM) movie files.
-  For comprehensive information on mne_analyze ,
-  please consult :ref:`ch_interactive_analysis`.
-
-- The command-line tool mne_make_movie can
-  be invoked to produce QuickTime movies and snapshots. mne_make_movie can
-  also output the data in the stc (movies) and w (snapshots) formats
-  for subsequent processing. Furthermore, subject-to-subject morphing
-  is included in mne_make_movie to
-  facilitate cross-subject averaging and comparison of data among
-  subjects. mne_make_movie is described
-  in :ref:`CBBECEDE`.
-
-- The command-line tool mne_make_movie can
-  be employed to interrogate the source estimate waveforms from labels
-  (ROIs).
-
-- The mne_make_movie tool
-  can be also used to create movies from stc files and to resample
-  stc files in time.
-
-- The mne_compute_raw_inverse tool
-  can be used to produce fif files containing source estimates at
-  selected ROIs. The input data file can be either a raw data or evoked
-  response MEG/EEG file, see :ref:`CBBCGHAH`.
-
-- Using the MNE Matlab toolbox, it is possible to perform many
-  of the above operations in Matlab using your own Matlab code based
-  on the MNE Matlab toolbox. For more information on the MNE Matlab
-  toolbox, see :ref:`ch_matlab`.
-
-- It is also possible to average the source estimates across
-  subjects as described in :ref:`ch_morph`.
diff --git a/doc/source/manual/intro.rst b/doc/source/manual/intro.rst
deleted file mode 100644
index e0ea63d..0000000
--- a/doc/source/manual/intro.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-
-
-.. _CHDDEFAB:
-
-============
-Introduction
-============
-
-This document describes a set of programs for preprocessing
-and averaging of MEG and EEG data and for constructing cortically-constrained minimum-norm
-estimates. This software package will in the sequel be referred to
-as *MNE software*. The software is based on anatomical
-MRI processing, forward modeling, and source estimation methods published in
-Dale, Fischl, Hämäläinen, and others.
-The software depends on anatomical MRI processing tools provided
-by the FreeSurfer software.
-
-:ref:`CHDBAFGJ` gives an overview of the software
-modules included with MNE software. :ref:`ch_cookbook` is a concise cookbook
-describing a typical workflow for a novice user employing the convenience
-scripts as far as possible. :ref:`ch_browse` to :ref:`ch_misc` give more detailed
-information about the software modules. :ref:`ch_sample_data` discusses
-processing of the sample data set included with the MNE software. :ref:`ch_reading` lists
-some useful background material for the methods employed in the
-MNE software.
-
-:ref:`create_bem_model` is an overview of the BEM model mesh
-generation methods, :ref:`setup_martinos` contains information specific
-to the setup at Martinos Center of Biomedical Imaging, :ref:`install_config` is
-a software installation and configuration guide, :ref:`release_notes` summarizes
-the software history, and :ref:`licence` contains the End-User
-License Agreement.
-
-.. note:: The most recent version of this manual is available    at ``$MNE_ROOT/share/doc/MNE-manual-`` <*version*> ``.pdf`` . For    the present manual, <*version*> = ``2.7`` .    For definition of the ``MNE_ROOT`` environment variable,    see :ref:`user_environment`.
-
-We want to thank all MNE Software users at the Martinos Center and
-in other institutions for their collaboration during the creation
-of this software as well as for useful comments on the software
-and its documentation.
-
-The development of this software has been supported by the
-NCRR *Center for Functional Neuroimaging Technologies* P41RR14075-06, the
-NIH grants 1R01EB009048-01, R01 EB006385-A101, 1R01 HD40712-A1, 1R01
-NS44319-01, and 2R01 NS37462-05, ell as by Department of Energy
-under Award Number DE-FG02-99ER62764 to The MIND Institute. 
diff --git a/doc/source/manual/list.rst b/doc/source/manual/list.rst
deleted file mode 100644
index ee5f9aa..0000000
--- a/doc/source/manual/list.rst
+++ /dev/null
@@ -1,439 +0,0 @@
-
-
-.. _CHDBAFGJ:
-
-========
-Overview
-========
-
-List of components
-##################
-
-The principal components of the MNE Software and their functions
-are listed in :ref:`CHDDJIDB`. Documented software is listed
-in italics. :ref:`BABDJHGH` lists various supplementary utilities.
-
-.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
-.. _CHDDJIDB:
-.. table:: The software components.
-
-    +----------------------------+--------------------------------------------+
-    | Name                       |   Purpose                                  |
-    +============================+============================================+
-    | *mne_analyze*              | An interactive analysis tool for computing |
-    |                            | source estimates, see                      |
-    |                            | :ref:`ch_interactive_analysis`.            |
-    +----------------------------+--------------------------------------------+
-    | *mne_average_estimates*    | Average data across subjects,              |
-    |                            | see :ref:`CHDEHFGD`.                       |
-    +----------------------------+--------------------------------------------+
-    | *mne_browse_raw*           | Interactive raw data browser. Includes     |
-    |                            | filtering, offline averaging, and          |
-    |                            | computation of covariance matrices,        |
-    |                            | see :ref:`ch_browse`.                      |
-    +----------------------------+--------------------------------------------+
-    | *mne_compute_mne*          | Computes the minimum-norm estimates,       |
-    |                            | see :ref:`BABDABHI`. Most of the           |
-    |                            | functionality of mne_compute_mne is        |
-    |                            | included in mne_make_movie.                |
-    +----------------------------+--------------------------------------------+
-    | *mne_compute_raw_inverse*  | Compute the inverse solution from raw data |
-    |                            | see :ref:`CBBCGHAH`.                       |
-    +----------------------------+--------------------------------------------+
-    | *mne_convert_mne_data*     | Convert MNE data files to other file       |
-    |                            | formats, see :ref:`BEHCCEBJ`.              |
-    +----------------------------+--------------------------------------------+
-    | *mne_do_forward_solution*  | Convenience script to calculate the forward|
-    |                            | solution matrix, see :ref:`BABCHEJD`.      |
-    +----------------------------+--------------------------------------------+
-    | *mne_do_inverse_operator*  | Convenience script for inverse operator    |
-    |                            | decomposition, see :ref:`CIHCFJEI`.        |
-    +----------------------------+--------------------------------------------+
-    | *mne_forward_solution*     | Calculate the forward solution matrix, see |
-    |                            | :ref:`CHDDIBAH`.                           |
-    +----------------------------+--------------------------------------------+
-    | mne_inverse_operator       | Compute the inverse operator decomposition |
-    |                            | see :ref:`CBBDDBGF`.                       |
-    +----------------------------+--------------------------------------------+
-    | *mne_make_movie*           | Make movies in batch mode, see             |
-    |                            | :ref:`CBBECEDE`.                           |
-    +----------------------------+--------------------------------------------+
-    | *mne_make_source_space*    | Create a *fif* source space description    |
-    |                            | file, see :ref:`BEHCGJDD`.                 |
-    +----------------------------+--------------------------------------------+
-    | *mne_process_raw*          | A batch-mode version of mne_browse_raw,    |
-    |                            | see :ref:`ch_browse`.                      |
-    +----------------------------+--------------------------------------------+
-    | mne_redo_file              | Many intermediate result files contain a   |
-    |                            | description of their                       |
-    |                            | 'production environment'. Such files can   |
-    |                            | be recreated easily with this utility.     |
-    |                            | This is convenient if, for example,        |
-    |                            | the selection of bad channels is changed   |
-    |                            | and the inverse operator decomposition has |
-    |                            | to be recalculated.                        |
-    +----------------------------+--------------------------------------------+
-    | mne_redo_file_nocwd        | Works like mne_redo_file but does not try  |
-    |                            | to change in to the working directory      |
-    |                            | specified in the 'production environment'. |
-    +----------------------------+--------------------------------------------+
-    | *mne_setup_forward_model*  | Set up the BEM-related fif files,          |
-    |                            | see :ref:`CIHDBFEG`.                       |
-    +----------------------------+--------------------------------------------+
-    | *mne_setup_mri*            | A convenience script to create the fif     |
-    |                            | files describing the anatomical MRI data,  |
-    |                            | see :ref:`BABCCEHF`                        |
-    +----------------------------+--------------------------------------------+
-    | *mne_setup_source_space*   | A convenience script to create source space|
-    |                            | description file, see :ref:`CIHCHDAE`.     |
-    +----------------------------+--------------------------------------------+
-    | mne_show_environment       | Show information about the production      |
-    |                            | environment of a file.                     |
-    +----------------------------+--------------------------------------------+
-
-
-.. tabularcolumns:: |p{0.3\linewidth}|p{0.65\linewidth}|
-.. _BABDJHGH:
-.. table:: Utility programs.
-
-    +---------------------------------+--------------------------------------------+
-    | Name                            |   Purpose                                  |
-    +=================================+============================================+
-    | *mne_add_patch_info*            | Add neighborhood information to a source   |
-    |                                 | space file, see :ref:`BEHCBCGG`.           |
-    +---------------------------------+--------------------------------------------+
-    | *mne_add_to_meas_info*          | Utility to add new information to the      |
-    |                                 | measurement info block of a fif file. The  |
-    |                                 | source of information is another fif file. |
-    +---------------------------------+--------------------------------------------+
-    | *mne_add_triggers*              | Modify the trigger channel STI 014 in a raw|
-    |                                 | data file, see :ref:`CHDBDDDF`. The same   |
-    |                                 | effect can be reached by using an event    |
-    |                                 | file for averaging in mne_process_raw and  |
-    |                                 | mne_browse_raw.                            |
-    +---------------------------------+--------------------------------------------+
-    | *mne_annot2labels*              | Convert parcellation data into label files,|
-    |                                 | see :ref:`CHDEDHCG`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_anonymize*                 | Remove subject-specific information from a |
-    |                                 | fif data file, see :ref:`CHDIJHIC`.        |
-    +---------------------------------+--------------------------------------------+
-    | *mne_average_forward_solutions* | Calculate an average of forward solutions, |
-    |                                 | see :ref:`CHDBBFCA`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_brain_vision2fiff*         | Convert EEG data from BrainVision format   |
-    |                                 | to fif format, see :ref:`BEHCCCDC`.        |
-    +---------------------------------+--------------------------------------------+
-    | *mne_change_baselines*          | Change the dc offsets according to         |
-    |                                 | specifications given in a text file,       |
-    |                                 | see :ref:`CHDDIDCC`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_change_nave*               | Change the number of averages in an        |
-    |                                 | evoked-response data file. This is often   |
-    |                                 | necessary if the file was derived from     |
-    |                                 | several files.                             |
-    +---------------------------------+--------------------------------------------+
-    | *mne_check_eeg_locations*       | Checks that the EEG electrode locations    |
-    |                                 | have been correctly transferred from the   |
-    |                                 | Polhemus data block to the channel         |
-    |                                 | information tags, see :ref:`CHDJGGGC`.     |
-    +---------------------------------+--------------------------------------------+
-    | *mne_check_surface*             | Check the validity of a FreeSurfer surface |
-    |                                 | file or one of the surfaces within a BEM   |
-    |                                 | file. This program simply checks for       |
-    |                                 | topological errors in surface files.       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_collect_transforms*        | Collect coordinate transformations from    |
-    |                                 | several sources into a single fif file,    |
-    |                                 | see :ref:`BABBIFIJ`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_compensate_data*           | Change the applied software gradient       |
-    |                                 | compensation in an evoked-response data    |
-    |                                 | file, see :ref:`BEHDDFBI`.                 |
-    +---------------------------------+--------------------------------------------+
-    | *mne_convert_lspcov*            | Convert the LISP format noise covariance   |
-    |                                 | matrix output by graph into fif,           |
-    |                                 | see :ref:`BEHCDBHG`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_convert_ncov*              | Convert the ncov format noise covariance   |
-    |                                 | file to fif, see :ref:`BEHCHGHD`.          |
-    +---------------------------------+--------------------------------------------+
-    | *mne_convert_surface*           | Convert FreeSurfer and text format surface |
-    |                                 | files into Matlab mat files,               |
-    |                                 | see :ref:`BEHDIAJG`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_cov2proj*                  | Pick eigenvectors from a covariance matrix |
-    |                                 | and create a signal-space projection (SSP) |
-    |                                 | file out of them, see :ref:`CHDECHBF`.     |
-    +---------------------------------+--------------------------------------------+
-    | *mne_create_comp_data*          | Create a fif file containing software      |
-    |                                 | gradient compensation information from a   |
-    |                                 | text file, see :ref:`BEHBIIFF`.            |
-    +---------------------------------+--------------------------------------------+
-    | *mne_ctf2fiff*                  | Convert a CTF ds folder into a fif file,   |
-    |                                 | see :ref:`BEHDEBCH`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_ctf_dig2fiff*              | Convert text format digitization data to   |
-    |                                 | fif format, see :ref:`BEHBABFA`.           |
-    +---------------------------------+--------------------------------------------+
-    | *mne_dicom_essentials*          | List essential information from a          |
-    |                                 | DICOM file.                                |
-    |                                 | This utility is used by the script         |
-    |                                 | mne_organize_dicom, see :ref:`BABEBJHI`.   |
-    +---------------------------------+--------------------------------------------+
-    | *mne_edf2fiff*                  | Convert EEG data from the EDF/EDF+/BDF     |
-    |                                 | formats to the fif format,                 |
-    |                                 | see :ref:`BEHIAADG`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_epochs2mat*                | Apply bandpass filter to raw data and      |
-    |                                 | extract epochs for subsequent processing   |
-    |                                 | in Matlab, see :ref:`BEHFIDCB`.            |
-    +---------------------------------+--------------------------------------------+
-    | *mne_evoked_data_summary*       | List summary of averaged data from a fif   |
-    |                                 | file to the standard output.               |
-    +---------------------------------+--------------------------------------------+
-    | *mne_eximia2fiff*               | Convert EEG data from the Nexstim eXimia   |
-    |                                 | system to fif format, see :ref:`BEHGCEHH`. |
-    +---------------------------------+--------------------------------------------+
-    | *mne_fit_sphere_to_surf*        | Fit a sphere to a surface given in fif     |
-    |                                 | or FreeSurfer format, see :ref:`CHDECHBF`. |
-    +---------------------------------+--------------------------------------------+
-    | *mne_fix_mag_coil_types*        | Update the coil types for magnetometers    |
-    |                                 | in a fif file, see :ref:`CHDGAAJC`.        |
-    +---------------------------------+--------------------------------------------+
-    | *mne_fix_stim14*                | Fix coding errors of trigger channel       |
-    |                                 | STI 014, see :ref:`BABCDBDI`.              |
-    +---------------------------------+--------------------------------------------+
-    | *mne_flash_bem*                 | Create BEM tessellation using multi-echo   |
-    |                                 | FLASH MRI data, see :ref:`BABFCDJH`.       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_insert_4D_comp*            | Read Magnes compensation channel data from |
-    |                                 | a text file and merge it with raw data     |
-    |                                 | from other channels in a fif file, see     |
-    |                                 | :ref:`BEHGDDBH`.                           |
-    +---------------------------------+--------------------------------------------+
-    | *mne_list_bem*                  | List BEM information in text format,       |
-    |                                 | see :ref:`BEHBBEHJ`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_list_coil_def*             | Create the coil description file. This     |
-    |                                 | is run automatically at when the software  |
-    |                                 | is set up, see :ref:`BJEHHJIJ`.            |
-    +---------------------------------+--------------------------------------------+
-    | *mne_list_proj*                 | List signal-space projection data from a   |
-    |                                 | fif file.                                  |
-    +---------------------------------+--------------------------------------------+
-    | *mne_list_source_space*         | List source space information in text      |
-    |                                 | format suitable for importing into         |
-    |                                 | Neuromag MRIlab, see :ref:`BEHBHIDH`.      |
-    +---------------------------------+--------------------------------------------+
-    | *mne_list_versions*             | List versions and compilation dates of MNE |
-    |                                 | software modules, see :ref:`CHDFIGBG`.     |
-    +---------------------------------+--------------------------------------------+
-    | *mne_make_cor_set*              | Used by mne_setup_mri to create fif format |
-    |                                 | MRI description files from COR or mgh/mgz  |
-    |                                 | format MRI data, see :ref:`BABCCEHF`. The  |
-    |                                 | mne_make_cor_set utility is described      |
-    |                                 | in :ref:`BABBHHHE`.                        |
-    +---------------------------------+--------------------------------------------+
-    | *mne_make_derivations*          | Create a channel derivation data file, see |
-    |                                 | :ref:`CHDHJABJ`.                           |
-    +---------------------------------+--------------------------------------------+
-    | *mne_make_eeg_layout*           | Make a topographical trace layout file     |
-    |                                 | using the EEG electrode locations from     |
-    |                                 | an actual measurement, see :ref:`CHDDGDJA`.|
-    +---------------------------------+--------------------------------------------+
-    | *mne_make_morph_maps*           | Precompute the mapping data needed for     |
-    |                                 | morphing between subjects, see             |
-    |                                 | :ref:`CHDBBHDH`.                           |
-    +---------------------------------+--------------------------------------------+
-    | *mne_make_uniform_stc*          | Create a spatially uniform stc file for    |
-    |                                 | testing purposes.                          |
-    +---------------------------------+--------------------------------------------+
-    | *mne_mark_bad_channels*         | Update the list of unusable channels in    |
-    |                                 | a data file, see :ref:`CHDDHBEE`.          |
-    +---------------------------------+--------------------------------------------+
-    | *mne_morph_labels*              | Morph label file definitions between       |
-    |                                 | subjects, see :ref:`CHDCEAFC`.             |
-    +---------------------------------+--------------------------------------------+
-    | *mne_organize_dicom*            | Organized DICOM MRI image files into       |
-    |                                 | directories, see :ref:`BABEBJHI`.          |
-    +---------------------------------+--------------------------------------------+
-    | *mne_prepare_bem_model*         | Perform the geometry calculations for      |
-    |                                 | BEM forward solutions, see :ref:`CHDJFHEB`.|
-    +---------------------------------+--------------------------------------------+
-    | mne_process_stc                 | Manipulate stc files.                      |
-    +---------------------------------+--------------------------------------------+
-    | *mne_raw2mat*                   | Convert raw data into a Matlab file,       |
-    |                                 | see :ref:`convert_to_matlab`.              |
-    +---------------------------------+--------------------------------------------+
-    | *mne_rename_channels*           | Change the names and types of channels     |
-    |                                 | in a fif file, see :ref:`CHDCFEAJ`.        |
-    +---------------------------------+--------------------------------------------+
-    | *mne_sensitivity_map*           | Compute a sensitivity map and output       |
-    |                                 | the result in a w-file,                    |
-    |                                 | see :ref:`CHDDCBGI`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_sensor_locations*          | Create a file containing the sensor        |
-    |                                 | locations in text format.                  |
-    +---------------------------------+--------------------------------------------+
-    | *mne_show_fiff*                 | List contents of a fif file,               |
-    |                                 | see :ref:`CHDHEDEF`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_simu*                      | Simulate MEG and EEG data,                 |
-    |                                 | see :ref:`CHDECAFD`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_smooth*                    | Smooth a w or stc file.                    |
-    +---------------------------------+--------------------------------------------+
-    | *mne_surf2bem*                  | Create a *fif* file describing the         |
-    |                                 | triangulated compartment boundaries for    |
-    |                                 | the boundary-element model (BEM),          |
-    |                                 | see :ref:`BEHCACCJ`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_toggle_skips*              | Change data skip tags in a raw file into   |
-    |                                 | ignored skips or vice versa.               |
-    +---------------------------------+--------------------------------------------+
-    | *mne_transform_points*          | Transform between MRI and MEG head         |
-    |                                 | coordinate frames, see :ref:`CHDDDJCA`.    |
-    +---------------------------------+--------------------------------------------+
-    | *mne_tufts2fiff*                | Convert EEG data from the Tufts            |
-    |                                 | University format to fif format,           |
-    |                                 | see :ref:`BEHDGAIJ`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_view_manual*               | Starts a PDF reader to show this manual    |
-    |                                 | from its standard location.                |
-    +---------------------------------+--------------------------------------------+
-    | *mne_volume_data2mri*           | Convert volumetric data defined in a       |
-    |                                 | source space created with                  |
-    |                                 | mne_volume_source_space into an MRI        |
-    |                                 | overlay, see :ref:`BEHDEJEC`.              |
-    +---------------------------------+--------------------------------------------+
-    | *mne_volume_source_space*       | Make a volumetric source space,            |
-    |                                 | see :ref:`BJEFEHJI`.                       |
-    +---------------------------------+--------------------------------------------+
-    | *mne_watershed_bem*             | Do the segmentation for BEM using the      |
-    |                                 | watershed algorithm, see :ref:`BABBDHAG`.  |
-    +---------------------------------+--------------------------------------------+
-
-
-File formats
-############
-
-The MNE software employs the fif file format whenever possible.
-New tags have been added to incorporate information specific to
-the calculation of cortically contained source estimates. FreeSurfer
-file formats are also employed when needed to represent cortical
-surface geometry data as well as spatiotemporal distribution of
-quantities on the surfaces. Of particular interest are the w files,
-which contain static overlay data on the cortical surface and stc files,
-which contain dynamic overlays (movies).
-
-Conventions
-###########
-
-When command line examples are shown, the backslash character
-(\\) indicates a continuation line. It is also valid in the shells.
-In most cases, however, you can easily fit the commands listed in
-this manual on one line and thus omit the backslashes. The order
-of options  is irrelevant. Entries to be typed literally are shown
-like ``this`` . *Italicized* text indicates
-conceptual entries. For example, *<*dir*>* indicates a directory
-name.
-
-In the description of interactive software modules the notation <*menu*>/<*item*> is
-often used to denotes menu selections. For example, File/Quit stands
-for the Quit button in the File menu.
-
-All software modules employ the double-dash (``--``) option convention, *i.e.*, the
-option names are preceded by two dashes.
-
-Most of the programs have two common options to obtain general
-information:
-
-**\---help**
-
-    Prints concise usage information.
-
-**\---version**
-
-    Prints the program module name, version number, and compilation date.
-
-.. _user_environment:
-
-User environment
-################
-
-The system-dependent location of the MNE Software will be
-here referred to by the environment variable MNE_ROOT. There are
-two scripts for setting up user environment so that the software
-can be used conveniently:
-
-``$MNE_ROOT/bin/mne_setup_sh``
-
-and
-
-``$MNE_ROOT/bin/mne_setup``
-
-compatible with the POSIX and csh/tcsh shells, respectively. Since
-the scripts set environment variables they should be 'sourced' to
-the present shell. You can find which type of a shell you are using
-by saying
-
-``echo $SHELL``
-
-If the output indicates a POSIX shell (bash or sh) you should issue
-the three commands:
-
-``export MNE_ROOT=`` <*MNE*> ``export MATLAB_ROOT=`` <*Matlab*> ``. $MNE_ROOT/bin/mne_setup_sh``
-
-with <*MNE*> replaced
-by the directory where you have installed the MNE software and <*Matlab*> is
-the directory where Matlab is installed. If you do not have Matlab,
-leave MATLAB_ROOT undefined. If Matlab is not available, the utilities
-mne_convert_mne_data , mne_epochs2mat , mne_raw2mat ,
-and mne_simu will not work.
-
-For csh/tcsh the corresponding commands are:
-
-``setenv MNE_ROOT`` <*MNE*> ``setenv MATLAB_ROOT`` <*Matlab*> ``source $MNE_ROOT/bin/mne_setup``
-
-For BEM mesh generation using the watershed algorithm or
-on the basis of multi-echo FLASH MRI data (see :ref:`create_bem_model`) and
-for accessing the tkmedit program
-from mne_analyze, see :ref:`CACCHCBF`,
-the MNE software needs access to a FreeSurfer license
-and software. Therefore, to use these features it is mandatory that
-you set up the FreeSurfer environment
-as described in the FreeSurfer documentation.
-
-The environment variables relevant to the MNE software are
-listed in :ref:`CIHDGFAA`.
-
-.. tabularcolumns:: |p{0.3\linewidth}|p{0.55\linewidth}|
-.. _CIHDGFAA:
-.. table:: Environment variables
-
-    +-------------------------+--------------------------------------------+
-    | Name of the variable    |   Description                              |
-    +=========================+============================================+
-    | MNE_ROOT                | Location of the MNE software, see above.   |
-    +-------------------------+--------------------------------------------+
-    | FREESURFER_HOME         | Location of the FreeSurfer software.       |
-    |                         | Needed during FreeSurfer reconstruction    |
-    |                         | and if the FreeSurfer MRI viewer is used   |
-    |                         | with mne_analyze, see :ref:`CACCHCBF`.     |
-    +-------------------------+--------------------------------------------+
-    | SUBJECTS_DIR            | Location of the MRI data.                  |
-    +-------------------------+--------------------------------------------+
-    | SUBJECT                 | Name of the current subject.               |
-    +-------------------------+--------------------------------------------+
-    | MNE_TRIGGER_CH_NAME     | Name of the trigger channel in raw data,   |
-    |                         | see :ref:`BABBGJEA`.                       |
-    +-------------------------+--------------------------------------------+
-    | MNE_TRIGGER_CH_MASK     | Mask to be applied to the trigger channel  |
-    |                         | values, see :ref:`BABBGJEA`.               |
-    +-------------------------+--------------------------------------------+
-
-.. note::
-
-    Section :ref:`setup_martinos` contains information specific to the setup at
-    the Martinos Center including instructions to access the Neuromag software.
diff --git a/doc/source/manual/mne.rst b/doc/source/manual/mne.rst
deleted file mode 100644
index 3e50588..0000000
--- a/doc/source/manual/mne.rst
+++ /dev/null
@@ -1,1323 +0,0 @@
-
-
-.. _ch_mne:
-
-=====================
-The current estimates
-=====================
-
-Overview
-########
-
-This Chapter describes the computation of the minimum-norm
-estimates. This is accomplished with two programs: *mne_inverse_operator* and *mne_make_movie*.
-The chapter starts with a mathematical description of the method,
-followed by description of the two software modules. The interactive
-program for inspecting data and inverse solutions, mne_analyze ,
-is covered in :ref:`ch_interactive_analysis`.
-
-.. _CBBDJFBJ:
-
-Minimum-norm estimates
-######################
-
-This section describes the mathematical details of the calculation
-of minimum-norm estimates. In Bayesian sense, the ensuing current
-distribution is the maximum a posteriori (MAP) estimate under the
-following assumptions:
-
-- The viable locations of the currents
-  are constrained to the cortex. Optionally, the current orientations
-  can be fixed to be normal to the cortical mantle.
-
-- The amplitudes of the currents have a Gaussian prior distribution
-  with a known source covariance matrix.
-
-- The measured data contain additive noise with a Gaussian distribution with
-  a known covariance matrix. The noise is not correlated over time.
-
-The linear inverse operator
-===========================
-
-The measured data in the source estimation procedure consists
-of MEG and EEG data, recorded on a total of N channels. The task
-is to estimate a total of M strengths of sources located on the
-cortical mantle. If the number of source locations is P, M = P for
-fixed-orientation sources and M = 3P if the source orientations
-are unconstrained. The regularized linear inverse operator following
-from the Bayesian approach is given by the :math:`M \times N` matrix
-
-.. math::    M = R' G^T (G R' G^T + C)^{-1}\ ,
-
-where G is the gain matrix relating the source strengths
-to the measured MEG/EEG data, :math:`C` is the data noise-covariance matrix
-and :math:`R'` is the source covariance matrix.
-The dimensions of these matrices are :math:`N \times M`, :math:`N \times N`,
-and :math:`M \times M`, respectively. The :math:`M \times 1` source-strength
-vector is obtained by multiplying the :math:`N \times 1` data
-vector by :math:`M`.
-
-The expected value of the current amplitudes at time *t* is
-then given by :math:`\hat{j}(t) = Mx(t)`, where :math:`x(t)` is
-a vector containing the measured MEG and EEG data values at time *t*.
-
-.. _CBBHAAJJ:
-
-Regularization
-==============
-
-The a priori variance of the currents is, in practise, unknown.
-We can express this by writing :math:`R' = R/ \lambda^2`,
-which yields the inverse operator
-
-.. math::    M = R G^T (G R G^T + \lambda^2 C)^{-1}\ ,
-
-where the unknown current amplitude is now interpreted in
-terms of the regularization parameter :math:`\lambda^2`.
-Small :math:`\lambda^2` corresponds to large current amplitudes
-and complex estimate current patterns while a large :math:`\lambda^2` means the
-amplitude of the current is limited and a simpler, smooth, current
-estimate is obtained.
-
-We can arrive in the regularized linear inverse operator
-also by minimizing the cost function
-
-.. math::    S = \tilde{e}^T \tilde{e} + \lambda^2 j^T R^{-1} j\ ,
-
-where the first term consists of the difference between the
-whitened measured data (see :ref:`CHDDHAGE`) and those predicted
-by the model while the second term is a weighted-norm of the current
-estimate. It is seen that, with increasing :math:`\lambda^2`,
-the source term receive more weight and larger discrepancy between
-the measured and predicted data is tolerable.
-
-.. _CHDDHAGE:
-
-Whitening and scaling
-=====================
-
-The MNE software employs data whitening so that a 'whitened' inverse operator
-assumes the form
-
-.. math::    \tilde{M} = R \tilde{G}^T (\tilde{G} R \tilde{G}^T + I)^{-1}\ ,
-
-where :math:`\tilde{G} = C^{-^1/_2}G` is the spatially
-whitened gain matrix. The expected current values are :math:`\hat{j} = Mx(t)`,
-where :math:`x(t) = C^{-^1/_2}x(t)` is a the whitened measurement
-vector at *t*. The spatial whitening operator
-is obtained with the help of the eigenvalue decomposition :math:`C = U_C \Lambda_C^2 U_C^T` as :math:`C^{-^1/_2} = \Lambda_C^{-1} U_C^T`.
-In the MNE software the noise-covariance matrix is stored as the
-one applying to raw data. To reflect the decrease of noise due to
-averaging, this matrix, :math:`C_0`, is scaled
-by the number of averages, :math:`L`, *i.e.*, :math:`C = C_0 / L`.
-
-As shown above, regularization of the inverse solution is
-equivalent to a change in the variance of the current amplitudes
-in the Bayesian *a priori* distribution.
-
-Convenient choice for the source-covariance matrix :math:`R` is
-such that :math:`\text{trace}(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1`. With this choice we
-can approximate :math:`\lambda^2 \sim 1/SNR`, where SNR is
-the (power) signal-to-noise ratio of the whitened data.
-
-.. note:: The definition of the signal to noise-ratio/ :math:`\lambda^2` relationship    given above works nicely for the whitened forward solution. In the    un-whitened case scaling with the trace ratio :math:`\text{trace}(GRG^T) / \text{trace}(C)` does not make sense, since the diagonal elements summed have, in general,    different units of measure. For example, the MEG data are expressed    in T or T/m whereas the unit of EEG is Volts.
-
-.. _CBBHEGAB:
-
-Regularization of the noise-covariance matrix
-=============================================
-
-Since finite amount of data is usually available to compute
-an estimate of the noise-covariance matrix :math:`C`,
-the smallest eigenvalues of its estimate are usually inaccurate
-and smaller than the true eigenvalues. Depending on the seriousness
-of this problem, the following quantities can be affected:
-
-- The model data predicted by the current
-  estimate,
-
-- Estimates of signal-to-noise ratios, which lead to estimates
-  of the required regularization, see :ref:`CBBHAAJJ`,
-
-- The estimated current values, and
-
-- The noise-normalized estimates, see :ref:`CBBEAICH`.
-
-Fortunately, the latter two are least likely to be affected
-due to regularization of the estimates. However, in some cases especially
-the EEG part of the noise-covariance matrix estimate can be deficient, *i.e.*,
-it may possess very small eigenvalues and thus regularization of
-the noise-covariance matrix is advisable.
-
-The MNE software accomplishes the regularization by replacing
-a noise-covariance matrix estimate :math:`C` with
-
-.. math::    C' = C + \sum_k {\varepsilon_k \bar{\sigma_k}^2 I^{(k)}}\ ,
-
-where the index :math:`k` goes across
-the different channel groups (MEG planar gradiometers, MEG axial
-gradiometers and magnetometers, and EEG), :math:`\varepsilon_k` are
-the corresponding regularization factors, :math:`\bar{\sigma_k}` are
-the average variances across the channel groups, and :math:`I^{(k)}` are
-diagonal matrices containing ones at the positions corresponding
-to the channels contained in each channel group. The values :math:`\varepsilon_k` can
-be adjusted with the regularization options ``--magreg`` , ``--gradreg`` ,
-and ``--eegreg`` specified at the time of the inverse operator
-decomposition, see :ref:`CBBDDBGF`. The convenience script mne_do_inverse_solution has
-the ``--magreg`` and ``--gradreg`` combined to
-a single option, ``--megreg`` , see :ref:`CIHCFJEI`.
-Suggested range of values for :math:`\varepsilon_k` is :math:`0.05 \dotso 0.2`.
-
-.. _CHDBEHBC:
-
-Computation of the solution
-===========================
-
-The most straightforward approach to calculate the MNE is
-to employ expression for the original or whitened inverse operator
-directly. However, for computational convenience we prefer to take
-another route, which employs the singular-value decomposition (SVD)
-of the matrix
-
-.. math::    A = \tilde{G} R^{^1/_2} = U \Lambda V^T
-
-where the superscript :math:`^1/_2` indicates a
-square root of :math:`R`. For a diagonal matrix,
-one simply takes the square root of :math:`R` while
-in the more general case one can use the Cholesky factorization :math:`R = R_C R_C^T` and
-thus :math:`R^{^1/_2} = R_C`.
-
-With the above SVD it is easy to show that
-
-.. math::    \tilde{M} = R^{^1/_2} V \Gamma U^T
-
-where the elements of the diagonal matrix :math:`\Gamma` are
-
-.. math::    \gamma_k = \frac{1}{\lambda_k} \frac{\lambda_k^2}{\lambda_k^2 + \lambda^2}\ .
-
-With :math:`w(t) = U^T C^{-^1/_2} x(t)` the expression for
-the expected current is
-
-.. math::    \hat{j}(t) = R^C V \Gamma w(t) = \sum_k {\bar{v_k} \gamma_k w_k(t)}\ ,
-
-where :math:`\bar{v_k} = R^C v_k`, :math:`v_k` being
-the :math:`k` th column of :math:`V`. It is thus seen that the current estimate is
-a weighted sum of the 'modified' eigenleads :math:`v_k`.
-
-It is easy to see that :math:`w(t) \propto \sqrt{L}`.
-To maintain the relation :math:`(\tilde{G} R \tilde{G}^T) / \text{trace}(I) = 1` when :math:`L` changes
-we must have :math:`R \propto 1/L`. With this approach, :math:`\lambda_k` is
-independent of  :math:`L` and, for fixed :math:`\lambda`,
-we see directly that :math:`j(t)` is independent
-of :math:`L`.
-
-.. _CBBEAICH:
-
-Noise normalization
-===================
-
-The noise-normalized linear estimates introduced by Dale
-et al. require division of the expected current amplitude by its
-variance. Noise normalization serves three purposes:
-
-- It converts the expected current value
-  into a dimensionless statistical test variable. Thus the resulting
-  time and location dependent values are often referred to as dynamic
-  statistical parameter maps (dSPM).
-
-- It reduces the location bias of the estimates. In particular,
-  the tendency of the MNE to prefer superficial currents is eliminated.
-
-- The width of the point-spread function becomes less dependent
-  on the source location on the cortical mantle. The point-spread
-  is defined as the MNE resulting from the signals coming from a point
-  current source (a current dipole) located at a certain point on
-  the cortex.
-
-In practice, noise normalization requires the computation
-of the diagonal elements of the matrix
-
-.. math::    M C M^T = \tilde{M} \tilde{M}^T\ .
-
-With help of the singular-value decomposition approach we
-see directly that
-
-.. math::    \tilde{M} \tilde{M}^T\ = \bar{V} \Gamma^2 \bar{V}^T\ .
-
-Under the conditions expressed at the end of :ref:`CHDBEHBC`, it follows that the *t*-statistic values associated
-with fixed-orientation sources) are thus proportional to :math:`\sqrt{L}` while
-the *F*-statistic employed with free-orientation sources is proportional
-to :math:`L`, correspondingly.
-
-.. note:: A section discussing statistical considerations    related to the noise normalization procedure will be added to this    manual in one of the subsequent releases.
-
-.. note:: The MNE software usually computes the square    roots of the F-statistic to be displayed on the inflated cortical    surfaces. These are also proportional to :math:`\sqrt{L}`.
-
-.. _CHDCACDC:
-
-Predicted data
-==============
-
-Under noiseless conditions the SNR is infinite and thus leads
-to :math:`\lambda^2 = 0` and the minimum-norm estimate
-explains the measured data perfectly. Under realistic conditions,
-however, :math:`\lambda^2 > 0` and there is a misfit
-between measured data and those predicted by the MNE. Comparison
-of the predicted data, here denoted by :math:`x(t)`,
-and measured one can give valuable insight on the correctness of
-the regularization applied.
-
-In the SVD approach we easily find
-
-.. math::    \hat{x}(t) = G \hat{j}(t) = C^{^1/_2} U \Pi w(t)\ ,
-
-where the diagonal matrix :math:`\Pi` has
-elements :math:`\pi_k = \lambda_k \gamma_k` The predicted data is
-thus expressed as the weighted sum of the 'recolored eigenfields' in :math:`C^{^1/_2} U`.
-
-.. _CBBDBHDI:
-
-Cortical patch statistics
-=========================
-
-If the ``--cps`` option was used in source space
-creation (see :ref:`CIHCHDAE`) or if mne_add_patch_info described
-in :ref:`BEHCBCGG` was run manually the source space file
-will contain for each vertex of the cortical surface the information
-about the source space point closest to it as well as the distance
-from the vertex to this source space point. The vertices for which
-a given source space point is the nearest one define the cortical
-patch associated with with the source space point. Once these data
-are available, it is straightforward to compute the following cortical
-patch statistics (CPS) for each source location :math:`d`:
-
-- The average over the normals of at the
-  vertices in a patch, :math:`\bar{n_d}`,
-
-- The areas of the patches, :math:`A_d`,
-  and
-
-- The average deviation of the vertex normals in a patch from
-  their average, :math:`\sigma_d`, given in degrees.
-
-The orientation constraints
-===========================
-
-The principal sources of MEG and EEG signals are generally
-believed to be postsynaptic currents in the cortical pyramidal neurons.
-Since the net primary current associated with these microscopic
-events is oriented normal to the cortical mantle, it is reasonable
-to use the cortical normal orientation as a constraint in source
-estimation. In addition to allowing completely free source orientations,
-the MNE software implements three orientation constraints based
-of the surface normal data:
-
-- Source orientation can be rigidly fixed
-  to the surface normal direction (the ``--fixed`` option).
-  If cortical patch statistics are available the average normal over
-  each patch, :math:`\bar{n_d}`, are used to define
-  the source orientation. Otherwise, the vertex normal at the source
-  space location is employed.
-
-- A *location independent or fixed loose orientation
-  constraint* (fLOC) can be employed (the ``--loose`` option).
-  In this approach, a source coordinate system based on the local
-  surface orientation at the source location is employed. By default,
-  the three columns of the gain matrix G, associated with a given
-  source location, are the fields of unit dipoles pointing to the
-  directions of the x, y, and z axis of the coordinate system employed
-  in the forward calculation (usually the MEG head coordinate frame).
-  For LOC the orientation is changed so that the first two source
-  components lie in the plane normal to the surface normal at the source
-  location and the third component is aligned with it. Thereafter, the
-  variance of the source components tangential to the cortical surface are
-  reduced by a factor defined by the ``--loose`` option.
-
-- A *variable loose orientation constraint* (vLOC)
-  can be employed (the ``--loosevar`` option). This is similar
-  to fLOC except that the value given with the ``--loosevar`` option
-  will be multiplied by :math:`\sigma_d`, defined above.
-
-.. _CBBDFJIE:
-
-Depth weighting
-===============
-
-The minimum-norm estimates have a bias towards superficial
-currents. This tendency can be alleviated by adjusting the source
-covariance matrix :math:`R` to favor deeper source locations. In the depth
-weighting scheme employed in MNE analyze, the elements of :math:`R` corresponding
-to the :math:`p` th source location are be
-scaled by a factor
-
-.. math::    f_p = (g_{1p}^T g_{1p} + g_{2p}^T g_{2p} + g_{3p}^T g_{3p})^{-\gamma}\ ,
-
-where :math:`g_{1p}`, :math:`g_{2p}`, and :math:`g_{3p}` are the three columns
-of :math:`G` corresponding to source location :math:`p` and :math:`\gamma` is
-the order of the depth weighting, specified with the ``--weightexp`` option
-to mne_inverse_operator . The
-maximal amount of depth weighting can be adjusted ``--weightlimit`` option.
-
-.. _CBBDIJHI:
-
-fMRI-guided estimates
-=====================
-
-The fMRI weighting in MNE software means that the source-covariance matrix
-is modified to favor areas of significant fMRI activation. For this purpose,
-the fMRI activation map is thresholded first at the value defined by
-the ``--fmrithresh`` option to mne_do_inverse_operator or mne_inverse_operator .
-Thereafter, the source-covariance matrix values corresponding to
-the the sites under the threshold are multiplied by :math:`f_{off}`, set
-by the ``--fmrioff`` option.
-
-It turns out that the fMRI weighting has a strong influence
-on the MNE but the noise-normalized estimates are much less affected
-by it.
-
-.. _CBBDGIAE:
-
-Effective number of averages
-############################
-
-It is often the case that the epoch to be analyzed is a linear
-combination over conditions rather than one of the original averages
-computed. As stated above, the noise-covariance matrix computed
-is originally one corresponding to raw data. Therefore, it has to
-be scaled correctly to correspond to the actual or effective number
-of epochs in the condition to be analyzed. In general, we have
-
-.. math::    C = C_0 / L_{eff}
-
-where :math:`L_{eff}` is the effective
-number of averages. To calculate :math:`L_{eff}` for
-an arbitrary linear combination of conditions
-
-.. math::    y(t) = \sum_{i = 1}^n {w_i x_i(t)}
-
-we make use of the the fact that the noise-covariance matrix
-
-.. math::    C_y = \sum_{i = 1}^n {w_i^2 C_{x_i}} = C_0 \sum_{i = 1}^n {w_i^2 / L_i}
-
-which leads to
-
-.. math::    1 / L_{eff} = \sum_{i = 1}^n {w_i^2 / L_i}
-
-An important special case  of the above is a weighted average,
-where
-
-.. math::    w_i = L_i / \sum_{i = 1}^n {L_i}
-
-and, therefore
-
-.. math::    L_{eff} = \sum_{i = 1}^n {L_i}
-
-Instead of a weighted average, one often computes a weighted
-sum, a simplest case being a difference or sum of two categories.
-For a difference :math:`w_1 = 1` and :math:`w_2 = -1` and
-thus
-
-.. math::    1 / L_{eff} = 1 / L_1 + 1 / L_2
-
-or
-
-.. math::    L_{eff} = \frac{L_1 L_2}{L_1 + L_2}
-
-Interestingly, the same holds for a sum, where :math:`w_1 = w_2 = 1`.
-Generalizing, for any combination of sums and differences, where :math:`w_i = 1` or :math:`w_i = -1`, :math:`i = 1 \dotso n`,
-we have
-
-.. math::    1 / L_{eff} = \sum_{i = 1}^n {1/{L_i}}
-
-.. _CBBDDBGF:
-
-Inverse-operator decomposition
-##############################
-
-The program ``mne_inverse_operator`` calculates
-the decomposition :math:`A = \tilde{G} R^C = U \Lambda \bar{V^T}`, described in :ref:`CHDBEHBC`. It is normally invoked from the convenience
-script ``mne_do_inverse_operator`` . This section describes
-the options to ``mne_inverse_operator`` should a user need
-to invoke it directly for special-purpose processing.
-
-The command-line options of ``mne_inverse_operator`` are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---meg**
-
-    Employ MEG data in the calculation of the estimates.
-
-**\---eeg**
-
-    Employ EEG data in the calculation of the estimates. Note: The EEG
-    computations have not been thoroughly tested at this time.
-
-**\---fixed**
-
-    Use fixed source orientations normal to the cortical mantle. By default,
-    the source orientations are not constrained.
-
-**\---loose <*amount*>**
-
-    Employ a loose orientation constraint (LOC). This means that the source
-    covariance matrix entries corresponding to the current component
-    normal to the cortex are set equal to one and the transverse components
-    are set to <*amount*> . Recommended
-    value of amount is 0.2...0.6.
-
-**\---loosevar <*amount*>**
-
-    Use an adaptive loose orientation constraint. This option can be
-    only employed if the source spaces included in the forward solution
-    have the patch information computed, see :ref:`CIHCHDAE`. Blaa
-    blaa...***what???**
-
-**\---fwd <*name*>**
-
-    Specifies the name of the forward solution to use.
-
-**\---noisecov <*name*>**
-
-    Specifies the name of the noise-covariance matrix to use. If this
-    file contains a projection operator, attached by mne_browse_raw and mne_process_raw ,
-    no additional projection vectors can be added with the ``--proj`` option. For
-    backward compatibility, ``--senscov`` can be used as a synonym for ``--noisecov``.
-
-**\---noiserank <*value*>**
-
-    Specifies the rank of the noise covariance matrix explicitly rather than
-    trying to reduce it automatically. This option is seldom needed,
-
-**\---gradreg <*value*>**
-
-    Regularize the planar gradiometer section (channels for which the unit
-    of measurement is T/m) of the noise-covariance matrix by the given
-    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
-
-**\---magreg <*value*>**
-
-    Regularize the magnetometer and axial gradiometer section (channels
-    for which the unit of measurement is T) of the noise-covariance matrix
-    by the given amount. The value is restricted to the range 0...1.
-    For details, see :ref:`CBBHEGAB`.
-
-**\---eegreg <*value*>**
-
-    Regularize the EEG section of the noise-covariance matrix by the given
-    amount. The value is restricted to the range 0...1. For details, see :ref:`CBBHEGAB`.
-
-**\---diagnoise**
-
-    Omit the off-diagonal terms from the noise-covariance matrix in
-    the computations. This may be useful if the amount of signal-free
-    data has been insufficient to calculate a reliable estimate of the
-    full noise-covariance matrix.
-
-**\---srccov <*name*>**
-
-    Specifies the name of the diagonal source-covariance matrix to use.
-    By default the source covariance matrix is a multiple of the identity matrix.
-    This option can be employed to incorporate the fMRI constraint.
-    The software to create a source-covariance matrix file from fMRI
-    data will be provided in a future release of this software package.
-
-**\---depth**
-
-    Employ depth weighting. For details, see :ref:`CBBDFJIE`.
-
-**\---weightexp <*value*>**
-
-    This parameter determines the steepness of the depth weighting function
-    (default = 0.8). For details, see :ref:`CBBDFJIE`.
-
-**\---weightlimit <*value*>**
-
-    Maximum relative strength of the depth weighting (default = 10). For
-    details, see :ref:`CBBDFJIE`.
-
-**\---fmri <*name*>**
-
-    With help of this w file, an *a priori* weighting
-    can be applied to the source covariance matrix. The source of the
-    weighting is usually fMRI but may be also some other data, provided
-    that the weighting  can be expressed as a scalar value on the cortical
-    surface, stored in a w file. It is recommended that this w file
-    is appropriately smoothed (see :ref:`CHDEBAHH`) in mne_analyze , tksurfer or
-    with mne_smooth_w to contain
-    nonzero values at all vertices of the triangular tessellation of
-    the cortical surface. The name of the file given is used as a stem of
-    the w files. The actual files should be called <*name*> ``-lh.pri`` and <*name*> ``-rh.pri`` for
-    the left and right hemsphere weight files, respectively. The application
-    of the weighting is discussed in :ref:`CBBDIJHI`.
-
-**\---fmrithresh <*value*>**
-
-    This option is mandatory and has an effect only if a weighting function
-    has been specified with the ``--fmri`` option. If the value
-    is in the *a priori* files falls below this value
-    at a particular source space point, the source covariance matrix
-    values are multiplied by the value specified with the ``--fmrioff`` option
-    (default 0.1). Otherwise it is left unchanged.
-
-**\---fmrioff <*value*>**
-
-    The value by which the source covariance elements are multiplied
-    if the *a priori* weight falls below the threshold
-    set with ``--fmrithresh`` , see above.
-
-**\---bad <*name*>**
-
-    A text file to designate bad channels, listed one channel name on each
-    line of the file. If the noise-covariance matrix specified with the ``--noisecov`` option
-    contains projections, bad channel lists can be included only if
-    they specify all channels containing non-zero entries in a projection
-    vector. For example, bad channels can usually specify all magnetometers
-    or all gradiometers since the projection vectors for these channel
-    types are completely separate. Similarly, it is possible to include
-    MEG data only or EEG data only by using only one of ``--meg`` or ``--eeg`` options
-    since the projection vectors for MEG and EEG are always separate.
-
-**\---surfsrc**
-
-    Use a source coordinate system based on the local surface orientation
-    at the source location. By default, the three dipole components are
-    pointing to the directions of the x, y, and z axis of the coordinate system
-    employed in the forward calculation (usually the MEG head coordinate
-    frame). This option changes the orientation so that the first two
-    source components lie in the plane normal to the surface normal
-    at the source location and the third component is aligned with it.
-    If patch information is available in the source space, the normal
-    is the average patch normal, otherwise the vertex normal at the source
-    location is used. If the ``--loose`` or ``--loosevar`` option
-    is employed, ``--surfsrc`` is implied.
-
-**\---exclude <*name*>**
-
-    Exclude the source space points defined by the given FreeSurfer 'label' file
-    from the source reconstruction. This is accomplished by setting
-    the corresponding entries in the source-covariance matrix equal
-    to zero. The name of the file should end with ``-lh.label``
-    if it refers to the left hemisphere and with ``-rh.label`` if
-    it lists points in the right hemisphere, respectively.
-
-**\---proj <*name*>**
-
-    Include signal-space projection (SSP) information from this file. For information
-    on SSP, see :ref:`CACCHABI`. If the projections are present in
-    the noise-covariance matrix, the ``--proj`` option is
-    not allowed.
-
-**\---csd**
-
-    Compute the inverse operator for surface current densities instead
-    of the dipole source amplitudes. This requires the computation of patch
-    statistics for the source space. Since this computation is time consuming,
-    it is recommended that the patch statistics are precomputed and
-    the source space file containing the patch information is employed
-    already when the forward solution is computed, see :ref:`CIHCHDAE` and :ref:`BABCHEJD`.
-    For technical details of the patch information, please consult :ref:`CBBDBHDI`. This option is considered experimental at
-    the moment.
-
-**\---inv <*name*>**
-
-    Save the inverse operator decomposition here.
-
-.. _CBBECEDE:
-
-Producing movies and snapshots
-##############################
-
-mne_make_movie is a program
-for producing movies and snapshot graphics frames without any graphics
-output to the screen. In addition, mne_make_movie can
-produce stc or w files which contain the numerical current estimate
-data in a simple binary format for postprocessing. These files can
-be displayed in mne_analyze ,
-see :ref:`ch_interactive_analysis`, utilized in the cross-subject averaging
-process, see :ref:`ch_morph`, and read into Matlab using the MNE
-Matlab toolbox, see :ref:`ch_matlab`.
-
-The command-line options to mne_make_movie are
-explained in the following subsections.
-
-General options
-===============
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-Input files
-===========
-
-**\---inv <*name*>**
-
-    Load the inverse operator decomposition from here.
-
-**\---meas <*name*>**
-
-    Load the MEG or EEG data from this file.
-
-**\---set <*number*>**
-
-    The data set (condition) number to load. This is the sequential
-    number of the condition. You can easily see the association by looking
-    at the condition list in mne_analyze when
-    you load the file.
-
-**\---stcin <*name*>**
-
-    Specifies an stc file to read as input.
-
-Times and baseline
-==================
-
-**\---tmin <*time/ms*>**
-
-    Specifies the starting time employed in the analysis. If ``--tmin`` option
-    is missing the analysis starts from the beginning of the epoch.
-
-**\---tmax <*time/ms*>**
-
-    Specifies the finishing time employed in the analysis. If ``--tmax`` option
-    is missing the analysis extends to the end of the epoch.
-
-**\---tstep <*step/ms*>**
-
-    Time step between consequtive movie frames, specified in milliseconds.
-
-**\---integ  <*:math:`\Delta`t/ms*>**
-
-    Integration time for each frame. Defaults to zero. The integration will
-    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
-    the integration range will be :math:`t_0 - \Delta t/2 \leq t \leq t_0 + \Delta t/2`.
-
-**\---pick <*time/ms*>**
-
-    Pick a time for the production of rgb, tif, jpg, png, or w files.
-    Several pick options may be present. The time must be with in the
-    analysis interval, indicated by the ``--tmin`` and ``--tmax`` options.
-    The ``--rgb`` , ``--tif`` , ``--jpg`` , ``--png`` , and ``--w`` options
-    control which file types are actually produced. When a ``--pick`` option
-    is encountered, the effect of any preceding ``--pickrange`` option
-    is ignored.
-
-**\---pickrange**
-
-    All previous ``-pick`` options will be ignored. Instead,
-    snapshots are produced as indicated by the ``--tmin`` , ``--tmax`` ,
-    and ``--tstep`` options. This is useful, *e.g.*,
-    for producing input for scripts merging the individual graphics
-    snapshots into a composite "filmstrip" reprensentation.
-    However, such scripts are not yet part of the MNE software.
-
-**\---bmin <*time/ms*>**
-
-    Specifies the starting time of the baseline. In order to activate
-    baseline correction, both ``--bmin`` and ``--bmax`` options
-    must be present.
-
-**\---bmax <*time/ms*>**
-
-    Specifies the finishing time of the baseline.
-
-**\---baselines <*file_name*>**
-
-    Specifies a file which contains the baseline settings. Each line
-    of the file should contain a name of a channel, followed by the
-    baseline value, separated from the channel name by a colon. The
-    baseline values must be specified in basic units, i.e., Teslas/meter
-    for gradiometers, Teslas for magnetometers, and Volts for EEG channels.
-    If some channels are missing from the baseline file, warning messages are
-    issued: for these channels, the ``--bmin`` and ``--bmax`` settings will
-    be used.
-
-Options controlling the estimates
-=================================
-
-**\---nave <*value*>**
-
-    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
-    as discussed in :ref:`CBBDGIAE`. If the input data file is
-    one produced by mne_browse_raw or mne_process_raw , the
-    number of averages is correct in the file. However, if subtractions
-    or some more complicated combinations of simple averages are produced,
-    e.g., by  using the xplotter software,
-    the number of averages should be manually adjusted along the guidelines
-    given in :ref:`CBBDGIAE`. This is accomplished either by
-    employing this flag or by adjusting the number of averages in the
-    data file with help of the utility mne_change_nave .
-
-**\---snr <*value*>**
-
-    An estimate for the amplitude SNR. The regularization parameter will
-    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
-    SNR = 3. Automatic selection of the regularization parameter is
-    currently not supported.
-
-**\---spm**
-
-    Calculate the dSPM instead of the expected current value.
-
-**\---sLORETA**
-
-    Calculate the noise-normalized estimate using the sLORETA approach.
-    sLORETA solutions have in general a smaller location bias than either
-    the expected current (MNE) or the dSPM.
-
-**\---signed**
-
-    Indicate the current direction with respect to the cortex outer
-    normal by sign. Currents flowing out of the cortex are thus considered
-    positive (warm colors) and currents flowing into the cortex negative (cold
-    colors).
-
-**\---picknormalcomp**
-
-    The components of the estimates corresponding to directions tangential
-    with the cortical mantle are zeroed out.
-
-.. _CBBBBHIF:
-
-Visualization options
-=====================
-
-**\---subject <*subject*>**
-
-    Specifies the subject whose MRI data is employed in the visualization.
-    This must be the same subject that was used for computing the current
-    estimates. The environment variable SUBJECTS_DIR must be set to
-    point to a locations where the subjects are to be found.
-
-**\---morph <*subject*>**
-
-    Morph the data to to the cortical surface of another subject. The Quicktime
-    movie, stc-file, graphics snapshot, and w-file outputs are affected
-    by this option, *i.e.*, they will take the morphing
-    into account and will represent the data on the cortical surface
-    of the subject defined with this option. The stc files morphed to
-    a single subject's cortical surface are used by mne_average_estimates to
-    combine data from different subjects, see :ref:`CHDFDIFE`.
-    If morphing is selected appropriate smoothing must be specified
-    with the ``--smooth`` option. The morphing process can
-    be made faster by precomputing the necessary morphing maps with mne_make_morph_maps ,
-    see :ref:`CHDBBHDH`. More information about morphing and averaging
-    can be found in :ref:`ch_morph`.
-
-**\---morphgrade <*number*>**
-
-    Adjusts the number of vertices in the stc files produced when morphing
-    is in effect. By default the number of vertices is 10242 corresponding
-    to --morphgrade value 5. Allowed values are 3, 4, 5, and 6 corresponding
-    to 642, 2562, 10242, and 40962 vertices, respectively.
-
-**\---surface <*surface name*>**
-
-    Name of the surface employed in the visualization. The default is inflated .
-
-**\---curv <*name*>**
-
-    Specify a nonstandard curvature file name. The default curvature files
-    are ``lh.curv`` and ``rh.curv`` . With this option,
-    the names become ``lh.`` <*name*> and ``rh.`` <*name*> .
-
-**\---patch <*name*> [: <*angle/deg*> ]**
-
-    Specify the name of a surface patch to be used for visualization instead
-    of the complete cortical surface. A complete name of a patch file
-    in the FreeSurface surf directory must be given. The name should
-    begin with lh or rh to allow association of the patch with a hemisphere.
-    Maximum of two ``--patch`` options can be in effect, one patch for each
-    hemisphere. If the name refers to a flat patch, the name can be
-    optionally followed by a colon and a rotation angle in degrees.
-    The flat patch will be then rotated counterclockwise by this amount
-    before display. You can check a suitable value for the rotation
-    angle by loading the patch interactively in mne_analyze .
-
-**\---width <*value*>**
-
-    Width of the graphics output frames in pixels. The default width
-    is 600 pixels.
-
-**\---height <*value*>**
-
-    Height of the graphics output frames in pixels. The default height
-    is 400 pixels.
-
-**\---mag <*factor*>**
-
-    Magnify the the visualized scene by this factor.
-
-**\---lh**
-
-    Select the left hemisphere for graphics output. By default, both hemisphere
-    are processed.
-
-**\---rh**
-
-    Select the right hemisphere for graphics output. By default, both hemisphere
-    are processed.
-
-**\---view <*name*>**
-
-    Select the name of the view for mov, rgb, and tif graphics output files.
-    The default viewnames, defined in ``$MNE_ROOT/share/mne/mne_analyze/eyes`` ,
-    are *lat* (lateral), *med* (medial), *ven* (ventral),
-    and *occ* (occipital). You can override these
-    defaults by creating the directory .mne under your home directory
-    and copying the eyes file there. Each line of the eyes file contais
-    the name of the view, the viewpoint for the left hemisphere, the
-    viewpoint for the right hemisphere, left hemisphere up vector, and
-    right hemisphere up vector. The entities are separated by semicolons.
-    Lines beginning with the pound sign (#) are considered to be comments.
-
-**\---smooth <*nstep*>**
-
-    Number of smoothsteps to take when producing the output frames. Depending
-    on the source space decimation, an appropriate number is 4 - 7.
-    Smoothing does not have any effect for the original brain if stc
-    files are produced. However, if morphing is selected smoothing is
-    mandatory even with stc output. For details of the smoothing procedure,
-    see :ref:`CHDEBAHH`.
-
-**\---nocomments**
-
-    Do not include the comments in the image output files or movies.
-
-**\---noscalebar**
-
-    Do not include the scalebar in the image output files or movies.
-
-**\---alpha <*value*>**
-
-    Adjust the opacity of maps shown on the cortical surface (0 = transparent,
-    1 = totally opaque). The default value is 1.
-
-Thresholding
-============
-
-**\---fthresh <*value*>**
-
-    Specifies the threshold for the displayed colormaps. At the threshold,
-    the overlayed color will be equal to the background surface color.
-    For currents, the value will be multiplied by :math:`1^{-10}`.
-    The default value is 8.
-
-**\---fmid <*value*>**
-
-    Specifies the midpoint for the displayed colormaps. At this value, the
-    overlayed color will be read (positive values) or blue (negative values).
-    For currents, the value will be multiplied by :math:`1^{-10}`.
-    The default value is 15.
-
-**\---fmax <*value*>**
-
-    Specifies the maximum point for the displayed colormaps. At this value,
-    the overlayed color will bright yellow (positive values) or light
-    blue (negative values). For currents, the value will be multiplied
-    by :math:`1^{-10}`. The default value is 20.
-
-**\---fslope <*value*>**
-
-    Included for backwards compatibility. If this option is specified
-    and ``--fmax`` option is *not* specified, :math:`F_{max} = F_{mid} + 1/F_{slope}`.
-
-Output files
-============
-
-**\---mov <*name*>**
-
-    Produce QuickTime movie files. This is the 'stem' of
-    the ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. The name of the view is indicated with ``-`` <*viename*> .
-    Finally, ``.mov`` is added to indicate a QuickTime output
-    file. The movie is produced for all times as dictated by the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
-    and ``--integ`` options.
-
-**\---qual <*value*>**
-
-    Quality of the QuickTime movie output. The default quality is 80 and
-    allowed range is 25 - 100. The size of the movie files is a monotonously
-    increasing function of the movie quality.
-
-**\---rate <*rate*>**
-
-    Specifies the frame rate of the QuickTime movies. The default value is :math:`1/(10t_{step})`,
-    where :math:`t_{step}` is the time between subsequent
-    movie frames produced in seconds.
-
-**\---rgb <*name*>**
-
-    Produce rgb snapshots. This is the 'stem' of the
-    ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. The name of the view is indicated with ``-`` <*viename*> .
-    Finally, ``.rgb`` is added to indicate an rgb output file.
-    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
-
-**\---tif <*name*>**
-
-    Produce tif snapshots. This is the 'stem' of the
-    ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. The name of the view is indicated with ``-`` <*viename*> .
-    Finally, ``.tif`` is added to indicate an rgb output file.
-    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
-    The tif output files are *not* compressed. Pass
-    the files through an image processing program to compress them.
-
-**\---jpg <*name*>**
-
-    Produce jpg snapshots. This is the 'stem' of the
-    ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. The name of the view is indicated with ``-`` <*viename*> .
-    Finally, ``.jpg`` is added to indicate an rgb output file.
-    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
-
-**\---png <*name*>**
-
-    Produce png snapshots. This is the 'stem' of the
-    ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` or ``-rh`` is
-    then appended. The name of the view is indicated with ``-`` <*viename*> .
-    Finally, ``.png`` is added to indicate an rgb output file.
-    Files are produced for all picked times as dictated by the ``--pick`` and ``--integ`` options.
-
-**\---w <*name*>**
-
-    Produce w file snapshots. This is the 'stem' of
-    the ouput file name. The actual name is derived by stripping anything
-    up to and including the last period from the end of <*name*> .
-    According to the hemisphere, ``-lh`` .w or ``-rh`` .w
-    is then appended. Files are produced for all picked times as dictated
-    by the ``--pick`` and ``--integ`` options.
-
-**\---stc <*name*>**
-
-    Produce stc files for either the original subject or the one selected with
-    the ``--morph`` option. These files will contain data only
-    for the decimated locations. If morphing is selected, appropriate
-    smoothing is mandatory. The morphed maps will be decimated with
-    help of a subdivided icosahedron so that the morphed stc files will
-    always contain 10242 vertices. These morphed stc files can be easily
-    averaged together, e.g., in Matlab since they always contain an
-    identical set of vertices.
-
-**\---norm <*name*>**
-
-    Indicates that a separate w file
-    containing the noise-normalization values will be produced. The
-    option ``--spm`` must also be present. Nevertheless, the
-    movies and stc files output will
-    contain MNE values. The noise normalization data files will be called <*name*>- <*SNR*> ``-lh.w`` and <*name*>- <*SNR*> ``-rh.w`` .
-
-.. _CBBHHCEF:
-
-Label processing
-================
-
-**\---label <*name*>**
-
-    Specifies a label file to process. For each label file, the values
-    of the computed estimates are listed in text files. The label files
-    are produced by tksurfer or mne_analyze and
-    specify regions of interests (ROIs). A label file name should end
-    with ``-lh.label`` for left-hemisphere ROIs and with ``-rh.label`` for
-    right-hemisphere ones. The corresponding output files are tagged
-    with ``-lh-`` <*data type*> ``.amp`` and ``-rh-`` <*data type*> ``.amp``, respectively. <*data type*> equals ``'mne`` ' for
-    expected current data and ``'spm`` ' for
-    dSPM data. Each line of the output file contains the waveform of
-    the output quantity at one of the source locations falling inside
-    the ROI. For more information about the label output formats, see :ref:`CACJJGFA`.
-
-**\---labelcoords**
-
-    Include coordinates of the vertices in the output. The coordinates will
-    be listed in millimeters in the coordinate system which was specified
-    for the forward model computations. This option cannot be used with
-    stc input files (``--stcin`` ) because the stc files do
-    not contain the coordinates of the vertices.
-
-**\---labelverts**
-
-    Include vertex numbers in the output. The numbers refer to the complete
-    triangulation of the corresponding surface and are zero based. The
-    vertex numbers are by default on the first row or first column of the
-    output file depending on whether or not the ``--labeltimebytime`` option
-    is present.
-
-**\---labeltimebytime**
-
-    Output the label data time by time instead of the default vertex-by-vertex
-    output.
-
-**\---labeltag <*tag*>**
-
-    End the output files with the specified tag. By default, the output files
-    will end with ``-mne.amp`` or ``-spm.amp`` depending
-    on whether MNE or one of the noise-normalized estimates (dSPM or sLORETA)
-    was selected.
-
-**\---labeloutdir <*directory*>**
-
-    Specifies the directory where the output files will be located.
-    By default, they will be in the current working directory.
-
-**\---labelcomments**
-
-    Include comments in the output files. The comment lines begin with the
-    percent sign to make the files compatible with Matlab.
-
-**\---scaleby <*factor*>**
-
-    By default, the current values output to the files will be in the
-    actual physical units (Am). This option allows scaling of the current
-    values to other units. mne_analyze typically
-    uses 1e10 to bring the numbers to a human-friendly scale.
-
-Using stc file input
-====================
-
-The ``--stcin`` option allows input of stc files.
-This feature has several uses:
-
-- QuickTime movies can be produced from
-  existing stc files without having to resort to EasyMeg.
-
-- Graphics snapshot can be produced from existing stc files.
-
-- Existing stc files can be temporally resampled with help of
-  the ``--tmin`` , ``--tmax`` , ``--tstep`` ,
-  and ``--integ`` options.
-
-- Existing stc files can be morphed to another cortical surface
-  by specifying the ``--morph`` option.
-
-- Timecourses can be inquired and stored into text files with
-  help of the ``--label`` options, see above.
-
-.. _CBBCGHAH:
-
-Computing inverse from raw and evoked data
-##########################################
-
-The purpose of the utility mne_compute_raw_inverse is
-to compute inverse solutions from either evoked-response or raw
-data at specified ROIs (labels) and to save the results in a fif
-file which can be viewed with mne_browse_raw ,
-read to Matlab directly using the MNE Matlab Toolbox, see :ref:`ch_matlab`,
-or converted to Matlab format using either mne_convert_mne_data , mne_raw2mat ,
-or mne_epochs2mat , see :ref:`ch_convert`.
-
-.. _CHDEIHFA:
-
-Command-line options
-====================
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in <*filename*>**
-
-    Specifies the input data file. This can be either an evoked data
-    file or a raw data file.
-
-**\---bmin <*time/ms*>**
-
-    Specifies the starting time of the baseline. In order to activate
-    baseline correction, both ``--bmin`` and ``--bmax`` options
-    must be present. This option applies to evoked data only.
-
-**\---bmax <*time/ms*>**
-
-    Specifies the finishing time of the baseline. This option applies
-    to evoked data only.
-
-**\---set <*number*>**
-
-    The data set (condition) number to load. This is the sequential
-    number of the condition. You can easily see the association by looking
-    at the condition list in mne_analyze when
-    you load the file.
-
-**\---inv <*name*>**
-
-    Load the inverse operator decomposition from here.
-
-**\---nave <*value*>**
-
-    Specifies the effective number of averaged epochs in the input data, :math:`L_{eff}`,
-    as discussed in :ref:`CBBDGIAE`. If the input data file is
-    one produced by mne_browse_raw or mne_process_raw ,
-    the number of averages is correct in the file. However, if subtractions
-    or some more complicated combinations of simple averages are produced,
-    e.g., by  using the xplotter software,
-    the number of averages should be manually adjusted along the guidelines
-    given in :ref:`CBBDGIAE`. This is accomplished either by
-    employing this flag or by adjusting the number of averages in the
-    data file with help of the utility mne_change_nave .
-
-**\---snr <*value*>**
-
-    An estimate for the amplitude SNR. The regularization parameter will
-    be set as :math:`\lambda^2 = 1/SNR^2`. The default value is
-    SNR = 1. Automatic selection of the regularization parameter is
-    currently not supported.
-
-**\---spm**
-
-    Calculate the dSPM instead of the expected current value.
-
-**\---picknormalcomp**
-
-    The components of the estimates corresponding to directions tangential
-    with the cortical mantle are zeroed out.
-
-**\---mricoord**
-
-    Provide source locations and orientations in the MRI coordinate frame
-    instead of the default head coordinate frame.
-
-**\---label <*name*>**
-
-    Specifies a label file to process. For each label file, the values
-    of the computed estimates stored in a fif file. For more details,
-    see :ref:`CBBHJDAI`. The label files are produced by tksurfer
-    or mne_analyze and specify regions
-    of interests (ROIs). A label file name should end with ``-lh.label`` for
-    left-hemisphere ROIs and with ``-rh.label`` for right-hemisphere
-    ones. The corresponding output files are tagged with ``-lh-`` <*data type*> ``.fif`` and ``-rh-`` <*data type*> ``.fif`` , respectively. <*data type*> equals ``'mne`` ' for expected
-    current data and ``'spm`` ' for dSPM data.
-    For raw data, ``_raw.fif`` is employed instead of ``.fif`` .
-    The output files are stored in the same directory as the label files.
-
-**\---labelselout**
-
-    Produces additional label files for each label processed, containing only
-    those vertices within the input label which correspond to available
-    source space vertices in the inverse operator. These files have the
-    same name as the original label except that ``-lh`` and ``-rh`` are replaced
-    by ``-sel-lh`` and ``-sel-rh`` , respectively.
-
-**\---align_z**
-
-    Instructs the program to try to align the waveform signs within
-    the label. For more information, see :ref:`CBBHJDAI`. This
-    flag will not have any effect if the inverse operator has been computed
-    with the strict orientation constraint active.
-
-**\---labeldir <*directory*>**
-
-    All previous ``--label`` options will be ignored when this
-    option is encountered. For each label in the directory, the output
-    file defined with the ``--out`` option will contain a summarizing
-    waveform which is the average of the waveforms in the vertices of
-    the label. The ``--labeldir`` option implies ``--align_z`` and ``--picknormalcomp`` options.
-
-**\---orignames**
-
-    This option is used with the ``--labeldir`` option, above.
-    With this option, the output file channel names will be the names
-    of the label files, truncated to 15 characters, instead of names
-    containing the vertex numbers.
-
-**\---out <*name*>**
-
-    Required with ``--labeldir`` . This is the output file for
-    the data.
-
-**\---extra <*name*>**
-
-    By default, the output includes the current estimate signals and
-    the digital trigger channel, see ``--digtrig`` option,
-    below. With the ``--extra`` option, a custom set of additional
-    channels can be included. The extra channel text file should contain
-    the names of these channels, one channel name on each line. With
-    this option present, the digital trigger channel is not included
-    unless specified in the extra channel file.
-
-**\---noextra**
-
-    No additional channels will be included with this option present.
-
-**\---digtrig <*name*>**
-
-    Name of the composite digital trigger channel. The default value
-    is 'STI 014'. Underscores in the channel name
-    will be replaced by spaces.
-
-**\---split <*size/MB*>**
-
-    Specifies the maximum size of the raw data files saved. By default, the
-    output is split into files which are just below 2 GB so that the
-    fif file maximum size is not exceed.
-
-.. note:: The digital trigger channel can also be set with    the MNE_TRIGGER_CH_NAME environment variable. Underscores in the variable    value will *not* be replaced with spaces by mne_compute_raw_inverse .    Using the ``--digtrig`` option supersedes the MNE_TRIGGER_CH_NAME    environment variable.
-
-.. _CBBHJDAI:
-
-Implementation details
-======================
-
-The fif files output from mne_compute_raw_inverse have
-various fields of the channel information set to facilitate interpretation
-by postprocessing software as follows:
-
-**channel name**
-
-    Will be set to J[xyz] <*number*> ,
-    where the source component is indicated by the coordinat axis name
-    and number is the vertex number, starting from zero, in the complete
-    triangulation of the hemisphere in question.
-
-**logical channel number**
-
-    Will be set to is the vertex number, starting from zero, in the
-    complete triangulation of the hemisphere in question.
-
-**sensor location**
-
-    The location of the vertex in head coordinates or in MRI coordinates,
-    determined by the ``--mricoord`` flag.
-
-**sensor orientation**
-
-    The *x*-direction unit vector will point to the
-    direction of the current. Other unit vectors are set to zero. Again,
-    the coordinate system in which the orientation is expressed depends
-    on the ``--mricoord`` flag.
-
-The ``--align_z`` flag tries to align the signs
-of the signals at different vertices of the label. For this purpose,
-the surface normals within the label are collected into a :math:`n_{vert} \times 3` matrix.
-The preferred orientation will be taken as the first right singular
-vector of this matrix, corresponding to its largest singular value.
-If the dot product of the surface normal of a vertex is negative,
-the sign of the estimates at this vertex are inverted. The inversion
-is reflected in the current direction vector listed in the channel
-information, see above.
-
-.. note:: The raw data files output by mne_compute_raw_inverse can be converted to mat files with mne_raw2mat, see :ref:`convert_to_matlab`. Alternatively, the files can be read directly from Matlab using the routines in the MNE Matlab toolbox, see :ref:`ch_matlab`. The evoked data output can be easily read directly from Matlab using the fiff_load_evoked routine in the MNE Matlab toolbox. Both raw data and evoked output files can be loaded into mne_browse_raw, see :ref:`ch_browse`.
diff --git a/doc/source/manual/morph.rst b/doc/source/manual/morph.rst
deleted file mode 100644
index 25b3c6c..0000000
--- a/doc/source/manual/morph.rst
+++ /dev/null
@@ -1,409 +0,0 @@
-
-
-.. _ch_morph:
-
-======================
-Morphing and averaging
-======================
-
-Overview
-########
-
-The spherical morphing of the surfaces accomplished by FreeSurfer can be
-employed to bring data from different subjects into a common anatomical
-frame. This chapter describes utilities which make use of the spherical morphing
-procedure. mne_morph_labels morphs
-label files between subjects allowing the definition of labels in
-a one brain and transforming them to anatomically analogous labels
-in another. mne_average_estimates offers
-the capability to compute averages of data computed with the MNE software
-across subjects.
-
-.. _CHDJDHII:
-
-The morphing maps
-#################
-
-The MNE software accomplishes morphing with help of morphing
-maps which can be either computed on demand or precomputed using mne_make_morph_maps ,
-see :ref:`CHDBBHDH`. The morphing is performed with help
-of the registered spherical surfaces (``lh.sphere.reg`` and ``rh.sphere.reg`` )
-which must be produced in FreeSurfer .
-A morphing map is a linear mapping from cortical surface values
-in subject A (:math:`x^{(A)}`) to those in another
-subject B (:math:`x^{(B)}`)
-
-.. math::    x^{(B)} = M^{(AB)} x^{(A)}\ ,
-
-where :math:`M^{(AB)}` is a sparse matrix
-with at most three nonzero elements on each row. These elements
-are determined as follows. First, using the aligned spherical surfaces,
-for each vertex :math:`x_j^{(B)}`, find the triangle :math:`T_j^{(A)}` on the
-spherical surface of subject A which contains the location :math:`x_j^{(B)}`.
-Next, find the numbers of the vertices of this triangle and set
-the corresponding elements on the *j* th row of :math:`M^{(AB)}` so that :math:`x_j^{(B)}` will
-be a linear interpolation between the triangle vertex values reflecting
-the location :math:`x_j^{(B)}` within the triangle :math:`T_j^{(A)}`.
-
-It follows from the above definition that in general
-
-.. math::    M^{(AB)} \neq (M^{(BA)})^{-1}\ ,
-
-*i.e.*,
-
-.. math::    x_{(A)} \neq M^{(BA)} M^{(AB)} x^{(A)}\ ,
-
-even if
-
-.. math::    x^{(A)} \approx M^{(BA)} M^{(AB)} x^{(A)}\ ,
-
-*i.e.*, the mapping is *almost* a
-bijection.
-
-.. _CHDEBAHH:
-
-About smoothing
-###############
-
-The current estimates are normally defined only in a decimated
-grid which is a sparse subset of the vertices in the triangular
-tessellation of the cortical surface. Therefore, any sparse set
-of values is distributed to neighboring vertices to make the visualized
-results easily understandable. This procedure has been traditionally
-called smoothing but a more appropriate name
-might be smudging or blurring in
-accordance with similar operations in image processing programs.
-
-In MNE software terms, smoothing of the vertex data is an
-iterative procedure, which produces a blurred image :math:`x^{(N)}` from
-the original sparse image :math:`x^{(0)}` by applying
-in each iteration step a sparse blurring matrix:
-
-.. math::    x^{(p)} = S^{(p)} x^{(p - 1)}\ .
-
-On each row :math:`j` of the matrix :math:`S^{(p)}` there
-are :math:`N_j^{(p - 1)}` nonzero entries whose values
-equal :math:`1/N_j^{(p - 1)}`. Here :math:`N_j^{(p - 1)}` is
-the number of immediate neighbors of vertex :math:`j` which
-had non-zero values at iteration step :math:`p - 1`.
-Matrix :math:`S^{(p)}` thus assigns the average
-of the non-zero neighbors as the new value for vertex :math:`j`.
-One important feature of this procedure is that it tends to preserve
-the amplitudes while blurring the surface image.
-
-Once the indices non-zero vertices in :math:`x^{(0)}` and
-the topology of the triangulation are fixed the matrices :math:`S^{(p)}` are
-fixed and independent of the data. Therefore, it would be in principle
-possible to construct a composite blurring matrix
-
-.. math::    S^{(N)} = \prod_{p = 1}^N {S^{(p)}}\ .
-
-However, it turns out to be computationally more effective
-to do blurring with an iteration. The above formula for :math:`S^{(N)}` also
-shows that the smudging (smoothing) operation is linear.
-
-.. _CHDBBHDH:
-
-Precomputing the morphing maps
-##############################
-
-The utility mne_make_morph_maps was
-created to assist mne_analyze and mne_make_movie in
-morphing. Since the morphing maps described above take a while to
-compute, it is beneficial to construct all necessary maps in advance
-before using mne_make_movie .
-The precomputed morphing maps are located in ``$SUBJECTS_DIR/morph-maps`` . mne_make_morph_maps creates
-this directory automatically if it does not exist. If this directory
-exists when mne_analyze or mne_make_movie is run
-and morphing is requested, the software first looks for already
-existing morphing maps there. Also, if mne_analyze or mne_make_movie have
-to recompute any morphing maps, they will be saved to ``$SUBJECTS_DIR/morph-maps`` if
-this directory exists.
-
-The names of the files in ``$SUBJECTS_DIR/morph-maps`` are
-of the form:
-
- <*A*> - <*B*> -``morph.fif`` ,
-
-where <*A*> and <*B*> are
-names of subjects. These files contain the maps for both hemispheres,
-and in both directions, *i.e.*, both :math:`M^{(AB)}` and :math:`M^{(BA)}`, as
-defined above. Thus the files <*A*> - <*B*> -``morph.fif`` or <*B*> - <*A*> -``morph.fif`` are
-functionally equivalent. The name of the file produced by mne_analyze or mne_make_movie depends
-on the role of <*A*> and <*B*> in
-the analysis.
-
-If you choose to compute the morphing maps in batch in advance,
-use mne_make_morph_maps , which
-accepts the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---redo**
-
-    Recompute the morphing maps even if they already exist.
-
-**\---from <*subject*>**
-
-    Compute morphing maps from this subject.
-
-**\---to <*subject*>**
-
-    Compute morphing maps to this subject.
-
-**\---all**
-
-    Do all combinations. If this is used without either ``--from`` or ``--to`` options,
-    morphing maps for all possible combinations are computed. If ``--from`` or ``--to`` is
-    present, only maps between the specified subject and all others
-    are computed.
-
-.. note:: Because all morphing map files contain maps    in both directions, the choice of ``--from`` and ``--to`` options    only affect the naming of the morphing map files to be produced. mne_make_morph_maps creates    directory ``$SUBJECTS_DIR/morph-maps`` if necessary.
-
-.. _CHDCEAFC:
-
-Morphing label data
-###################
-
-In some instances it is desirable to use anatomically equivalent
-labels for all subjects in a study. This can be accomplished by
-creating a set of labels in one subject and morphing them to another
-subjects anatomy using the spherical morphing procedure. mne_morph_labels was
-created to facilitate this task. It has the following command-line
-options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---from <*subject*>**
-
-    Name of the subject for which the labels were originally defined.
-
-**\---to <*subject*>**
-
-    Name of the subject for which the morphed labels should be created.
-
-**\---labeldir <*directory*>**
-
-    A directory containing the labels to morph.
-
-**\---prefix <*prefix*>**
-
-    Adds <*prefix*> in the beginning
-    of the output label names. A dash will be inserted between <*prefix*> and
-    the rest of the name.
-
-**\---smooth <*number*>**
-
-    Apply smoothing with the indicated number of iteration steps (see :ref:`CHDEBAHH`) to the labels before morphing them. This is
-    advisable because otherwise the resulting labels may have little
-    holes in them since the morphing map is not a bijection. By default,
-    two smoothsteps are taken.
-
-As the labels are morphed, a directory with the name of the
-subject specified with the ``--to`` option is created under
-the directory specified with ``--labeldir`` to hold the
-morphed labels.
-
-.. _CHDFDIFE:
-
-Averaging
-#########
-
-Overview
-========
-
-As illustrated in :ref:`CHDDJBDH`, cross-subject averaging
-involves three straightforward steps:
-
-- Use mne_make_movie to
-  create stc files morphed to a single subject. This requires the
-  use of the ``--morph`` option, see :ref:`CBBECEDE`.5.
-  The resulting files will have identical selections of vertices on
-  the cortical surface of the subject used in averaging. This step
-  can be speeded up by precomputing the morphing maps employed in
-  the process, see :ref:`CHDBBHDH`.
-
-- Employ mne_average_estimates or
-  a Matlab script to read the data from the stc files and to produce
-  an output stc file containing the averaged data. The MNE Matlab
-  toolbox routines for reading and writing stc files are documented
-  in :ref:`ch_matlab`.
-
-- Use mne_analyze or mne_make_movie to
-  visualize the result or use the stc files from the previous step
-  in your own Matlab routines in further processing.
-
-.. _CHDDJBDH:
-
-.. figure:: pics/Averaging-flowchart.png
-    :alt: Workflow of the cross-subject averaging process in MNE
-
-    Workflow of the cross-subject averaging process
-    
-    References in parenthesis indicate sections and chapters of this manual
-
-.. note:: The old utility mne_grand_average has    been removed from the MNE software because of its inefficiency.    All users should adopt the combination of mne_make_movie and mne_average_estimates instead.
-
-.. warning:: With the ``--ico`` option it    is now possible to generate source spaces with equal number of vertices    in each subject. This may lead to the wrong conclusion that stc    data could be averaged without doing the morphing step first. Even    with identical number vertices in the source spaces it is mandatory    to process the data through mne_make_movie to    create corresponding source locations before using mne_average_estimates .
-
-.. _CHDEHFGD:
-
-The averager
-============
-
-mne_average_estimates is
-the new utility for averaging data in stc files. It requires that
-all stc files represent data on one individual's cortical
-surface and contain identical sets of vertices. mne_average_estimates uses
-linear interpolation to resample data in time as necessary. The
-command line arguments are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---desc <*filenname*>**
-
-    Specifies the description file for averaging. The format of this
-    file is described below.
-
-The description file
---------------------
-
-The description file for mne_average_estimates consists
-of a sequence of tokens, separated by whitespace (space, tab, or
-newline). If a token consists of several words it has to be enclosed
-in quotes. One or more tokens constitute an phrase, which has a
-meaning for the averaging definition. Any line starting with the
-pound sign (#) is a considered to be a comment line. There are two
-kinds of phrases in the description file: global and contextual.
-The global phrases have the same meaning independent on their location
-in the file while the contextual phrases have different effects depending
-on their location in the file.
-
-There are three types of contexts in the description file:
-the global context, an input context,
-and the output context. In the
-beginning of the file the context is global for
-defining global parameters. The input context
-defines one of the input files (subjects) while the output context
-specifies the destination for the average.
-
-The global phrases are:
-
-**tmin <*value/ms*>**
-
-    The minimum time to be considered. The output stc file starts at
-    this time point if the time ranges of the stc files include this
-    time. Otherwise the output starts from the next later available
-    time point.
-
-**tstep <*step/ms*>**
-
-    Time step between consecutive movie frames, specified in milliseconds.
-
-**tmax <*value/ms*>**
-
-    The maximum time point to be considered. A multiple of tstep will be
-    added to the first time point selected until this value or the last time
-    point in one of the input stc files is reached.
-
-**integ  <:math:`\Delta t` /*ms*>**
-
-    Integration time for each frame. Defaults to zero. The integration will
-    be performed on sensor data. If the time specified for a frame is :math:`t_0`,
-    the integration range will be :math:`t_0 - ^{\Delta t}/_2 \leq t \leq t_0 + ^{\Delta t}/_2`.
-
-**stc <*filename*>**
-
-    Specifies an input stc file. The filename can be specified with
-    one of the ``-lh.stc`` and ``-rh.stc`` endings
-    or without them. This phrase ends the present context and starts
-    an input context.
-
-**deststc <*filename*>**
-
-    Specifies the output stc file. The filename can be specified with
-    one of the ``-lh.stc`` and ``-rh.stc`` endings
-    or without them. This phrase ends the present context and starts
-    the output context.
-
-**lh**
-
-    Process the left hemisphere. By default, both hemispheres are processed.
-
-**rh**
-
-    Process the left hemisphere. By default, both hemispheres are processed.
-
-The contextual phrases are:
-
-**weight <*value*>**
-
-    Specifies the weight of the current data set. This phrase is valid
-    in the input and output contexts.
-
-**abs**
-
-    Specifies that the absolute value of the data should be taken. Valid
-    in all contexts. If specified in the global context, applies to
-    all subsequent input and output contexts. If specified in the input
-    or output contexts, applies only to the data associated with that
-    context.
-
-**pow <*value*>**
-
-    Specifies that the data should raised to the specified power. For
-    negative values, the absolute value of the data will be taken and
-    the negative sign will be transferred to the result, unless abs is
-    specified. Valid in all contexts. Rules of application are identical
-    to abs .
-
-**sqrt**
-
-    Means pow 0.5
-
-The effects of the options can be summarized as follows.
-Suppose that the description file includes :math:`P` contexts
-and the temporally resampled data are organized in matrices :math:`S^{(p)}`,
-where :math:`p = 1 \dotso P` is the subject index, and
-the rows are the signals at different vertices of the cortical surface.
-The average computed by mne_average_estimates is
-then:
-
-.. math::    A_{jk} = |w[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(B_{jk})]^{\alpha}|B_{jk}|^{\beta}
-
-with
-
-.. math::    B_{jk} = \sum_{p = 1}^p {\bar{w_p}[\newcommand\sgn{\mathop{\mathrm{sgn}}\nolimits}\sgn(S_{jk}^{(p)})^{\alpha_p}|S_{jk}^{(p)}|^{\beta_p}}
-
-and
-
-.. math::    \bar{w_p} = w_p / \sum_{p = 1}^p {|w_p|}\ .
-
-In the above, :math:`\beta_p` and :math:`w_p` are
-the powers and weights assigned to each of the subjects whereas :math:`\beta` and :math:`w` are
-the output weight and power value, respectively. The sign is either
-included (:math:`\alpha_p = 1`, :math:`\alpha = 1`)
-or omitted (:math:`\alpha_p = 2`, :math:`\alpha = 2`)
-depending on the presence of abs phrases in the description file.
-
-.. note:: mne_average_estimates requires    that the number of vertices in the stc files are the same and that    the vertex numbers are identical. This will be the case if the files    have been produced in mne_make_movie using    the ``--morph`` option.
-
-.. note:: It is straightforward to read and write stc    files using the MNE Matlab toolbox described in :ref:`ch_matlab` and    thus write custom Matlab functions to realize more complicated custom    group analysis tools.
diff --git a/doc/source/manual/pics/Averaging-flowchart.png b/doc/source/manual/pics/Averaging-flowchart.png
deleted file mode 100644
index 343ff02..0000000
Binary files a/doc/source/manual/pics/Averaging-flowchart.png and /dev/null differ
diff --git a/doc/source/manual/utilities.rst b/doc/source/manual/utilities.rst
deleted file mode 100644
index 69c1ad5..0000000
--- a/doc/source/manual/utilities.rst
+++ /dev/null
@@ -1,1402 +0,0 @@
-
-
-.. _ch_misc:
-
-=======================
-Miscellaneous utilities
-=======================
-
-Overview
-########
-
-This Chapter describes various utility programs included
-with the MNE software. Each utility documentation consists of a
-brief description of the purpose followed by the specification of
-command-line options.
-
-.. _CHDFIGBG:
-
-Finding software versions
-#########################
-
-The utility mne_list_versions lists
-version numbers and compilation dates of all software modules that
-provide this information. This administration utility is located
-in ``$MNE_ROOT/bin/admin`` , The output from mne_list_versions or
-output of individual modules with ``--version`` option
-is useful when bugs are reported to the developers of MNE software.
-
-.. _CHDHEDEF:
-
-Listing contents of a fif file
-##############################
-
-Using the utility mne_show_fiff it
-is possible to display information about the contents of a fif file
-to the standard output. The command line options for mne_show_fiff are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in  <*name*>**
-
-    Specifies the fif file whose contents will be listed.
-
-**\---verbose**
-
-    Produce a verbose output. The data of most tags is included in the output.
-    This excludes matrices and vectors. Only the first 80 characters
-    of strings are listed unless the ``--long`` option is present.
-
-**\---blocks**
-
-    Only list the blocks (the tree structure) of the file. The tags
-    within each block are not listed.
-
-**\---indent  <*number*>**
-
-    Number of spaces for indentation for each deeper level in the tree structure
-    of the fif files. The default indentation is 3 spaces in terse and
-    no spaces in verbose listing mode.
-
-**\---long**
-
-    List all data from string tags instead of the first 80 characters.
-    This options has no effect unless the ``--verbose`` option
-    is also present.
-
-**\---tag  <*number*>**
-
-    List only tags of this kind. Multiple ``--tag`` options
-    can be specified to list several different kinds of data.
-
-mne_show_fiff reads the
-explanations of tag kinds, block kinds, and units from ``$MNE_ROOT/share/mne/fiff_explanations.txt`` .
-
-Data file modification utilities
-################################
-
-This section contains utilities which can be used to add
-information or fix existing information in MEG/EEG data fif files.
-Unless otherwise noted these utilities can be applied to both raw
-and evoked data files.
-
-.. _CHDDHBEE:
-
-Designating bad channels: mne_mark_bad_channels
-===============================================
-
-This utility adds or replaces information about unusable
-(bad) channels. The command line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---bad  <*filename*>**
-
-    Specify a text file containing the names of the bad channels, one channel
-    name per line. The names of the channels in this file must match
-    those in the data file exactly. If this option is missing, the bad channel
-    information is cleared.
-
-** <*data file name*>**
-
-    The remaining arguments are taken as data file names to be modified.
-
-.. _CHDBFDIC:
-
-Fixing the encoding of the trigger channel: mne_fix_stim14
-==========================================================
-
-Some earlier versions of the Neuromag acquisition software
-had a problem with the encoding of the eighth bit on the digital
-stimulus channel STI 014. This problem has been now fixed. Old data
-files can be fixed with mne_fix_stim14 ,
-which takes raw data file names as arguments. mne_fix_stim14 also
-changes the calibration of STI 014 to unity. If the encoding of
-STI 014 is already correct, running mne_fix_stim14 will
-not have any effect on the raw data.
-
-In newer Neuromag Vectorview systems with 16-bit digital
-inputs the upper two bytes of the samples may be incorrectly set
-when stimulus input 16 is used and the data are acquired in the
-32-bit  mode. This problem can be fixed by running mne_fix_stim14 on
-a raw data file with the ``--32`` option:
-
-``mne_fix_stim14 --32``  <*raw data file*>
-
-In this case, the correction will be applied to the stimulus
-channels 'STI101' and 'STI201'.
-
-.. _CHDJGGGC:
-
-Updating EEG location info: mne_check_eeg_locations
-===================================================
-
-Some versions of the Neuromag acquisition software did not
-copy the EEG channel location information properly from the Polhemus
-digitizer information data block to the EEG channel information
-records if the number of EEG channels exceeds 60. The purpose of mne_check_eeg_locations is
-to detect this problem and fix it, if requested. The command-line
-options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---file  <*name*>**
-
-    Specify the measurement data file to be checked or modified.
-
-**\---dig  <*name*>**
-
-    Name of the file containing the Polhemus digitizer information. Default
-    is the data file name.
-
-**\---fix**
-
-    By default mne_check_eeg_locations only
-    checks for missing EEG locations (locations close to the origin).
-    With --fix mne_check_eeg_locations reads
-    the Polhemus data from the specified file and copies the EEG electrode
-    location information to the channel information records in the measurement
-    file. There is no harm running mne_check_eeg_locations on
-    a data file even if the EEG channel locations were correct in the
-    first place.
-
-.. _CHDGAAJC:
-
-Updating magnetometer coil types: mne_fix_mag_coil_types
-========================================================
-
-The purpose of mne_fix_mag_coil_types is
-to change coil type 3022 to 3024 in the MEG channel definition records
-in the data files specified on the command line.
-
-As shown in Tables 5.2 and 5.3, the Neuromag Vectorview systems
-can contain magnetometers with two different coil sizes (coil types
-3022 and 3023 vs. 3024). The systems incorporating coils of type
-3024 were introduced last. At some sites the data files have still
-defined the magnetometers to be of type 3022 to ensure compatibility
-with older versions of Neuromag software. In the MNE software as
-well as in the present version of Neuromag software coil type 3024
-is fully supported. Therefore, it is now safe to upgrade the data
-files to use the true coil type.
-
-If the ``--magnes`` option is specified, the 4D
-Magnes magnetometer coil type (4001) is changed to 4D Magnes gradiometer
-coil type (4002). Use this option always and *only
-if* your Magnes data comes from a system with axial gradiometers
-instead of magnetometers. The fif converter included with the Magnes
-system does not assign the gradiometer coil type correctly.
-
-.. note:: The effect of the difference between the coil    sizes of magnetometer types 3022 and 3024 on the current estimates    computed by the MNE software is very small. Therefore the use of mne_fix_mag_coil_types is    not mandatory.
-
-.. _CHDCFEAJ:
-
-Modifying channel names and types: mne_rename_channels
-======================================================
-
-Sometimes it is necessary to change the names types of channels
-in MEG/EEG data files. Such situations include:
-
-- Designating an EEG as an EOG channel.
-  For example, the EOG channels are not recognized as such in the
-  fif files converted from CTF data files.
-
-- Changing the name of the digital trigger channel of interest
-  to STI 014 so that mne_browse_raw and mne_process_raw will
-  recognize the correct channel without the need to specify the ``--digtrig``
-  option or the MNE_TRIGGER_CH_NAME environment variable every time a
-  data file is loaded.
-
-The utility mne_rename_channels was
-designed to meet the above needs. It recognizes the following command-line
-options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fif  <*name*>**
-
-    Specifies the name of the data file to modify.
-
-**\---alias  <*name*>**
-
-    Specifies the text file which contains the modifications to be applied,
-    see below.
-
-**\---revert**
-
-    Reverse the roles of old and new channel names in the alias file.
-
-Each line in the alias file contains the old name and new
-name for a channel, separated by a colon. The old name is a name
-of one of the channels presently in the file and the new name is
-the name to be assigned to it. The old name must match an existing
-channel name in the file exactly. The new name may be followed by
-another colon and a number which is the channel type to be assigned
-to this channel. The channel type options are listed in :ref:`CHDFHGCA`.
-
-.. _CHDFHGCA:
-
-.. table:: Channel types.
-
-    ==============  ======================
-    Channel type    Corresponding number
-    ==============  ======================
-    MEG             1
-    MCG             201
-    EEG             2
-    EOG             202
-    EMG             302
-    ECG             402
-    MISC            502
-    STIM            3
-    ==============  ======================
-
-.. warning:: Do not attempt to designate MEG channels    to EEG channels or vice versa. This may result in strange errors    during source estimation.
-
-.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system. This    allows you to use standard EEG channel names when defining derivations,    see :ref:`CHDHJABJ` and :ref:`CACFHAFH`, as well as in the    channel selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
-
-.. _CHDBDDDF:
-
-Modifying trigger channel data: mne_add_triggers
-================================================
-
-Purpose
--------
-
-The utility mne_add_triggers modifies
-the digital trigger channel (STI 014) in raw data files
-to include additional transitions. Since the raw data file is modified,
-it is possible to make irreversible changes. Use this utility with
-caution. It is recommended that you never run mne_add_triggers on
-an original raw data file.
-
-Command line options
---------------------
-
-mne_add_triggers accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---raw  <*name*>**
-
-    Specifies the raw data file to be modified.
-
-**\---trg  <*name*>**
-
-    Specifies the trigger line modification list. This text file should
-    contain two entries per line: the sample number and the trigger
-    number to be added into the file. The number of the first sample
-    in the file is zero. It is recommended that trigger numbers whose
-    binary equivalent has lower eight bits equal to zero are used to
-    avoid conflicts with the ordinary triggers occurring in the file.
-
-**\---delete**
-
-    Delete the triggers defined by the trigger file instead of adding
-    them. This enables changing the file to its original state, provided
-    that the trigger file is preserved.
-
-.. note:: Since mne_browse_raw and mne_process_raw can    employ an event file which effectively adds new trigger instants, mne_add_triggers is    for the most part obsolete but it has been retained in the MNE software    suite for backward compatibility.
-
-.. _CHDIJHIC:
-
-Removing identifying information
-================================
-
-Depending no the settings during acquisition in the Elekta-Neuromag EEG/MEG
-systems the data files may contain subject identifying information
-in unencrypted form. The utility mne_anonymize was
-written to clear tags containing such information from a fif file.
-Specifically, this utility removes the following tags from the fif
-file:
-
-.. _CHDEHBCG:
-
-.. table:: Tags cleared by mne_anonymize .
-
-    ========================  ==============================================
-    Tag                       Description
-    ========================  ==============================================
-    FIFF_SUBJ_FIRST_NAME      First name of the subject
-    FIFF_SUBJ_MIDDLE_NAME     Middle name of the subject
-    FIFF_SUBJ_LAST_NAME       Last name of the subject
-    FIFF_SUBJ_BIRTH_DAY       Birthday of the subject (Julian day number)
-    FIFF_SUBJ_SEX             The sex of the subject
-    FIFF_SUBJ_HAND            Handedness of the subject
-    FIFF_SUBJ_WEIGHT          Weight of the subject in kg
-    FIFF_SUBJ_HEIGHT          Height of the subject in m
-    FIFF_SUBJ_COMMENT         Comment about the subject
-    ========================  ==============================================
-
-.. note:: mne_anonymize normally    keeps the FIFF_SUBJ_HIS_ID tag which can be used to identify the    subjects uniquely after the information listed in :ref:`CHDEHBCG` have    been removed. If the ``--his`` option is specified on the command line,    the FIFF_SUBJ_HIS_ID tag will be removed as well. The data of the    tags listed in :ref:`CHDEHBCG` and the optional FIFF_SUBJ_HIS_ID    tag are overwritten with zeros and the space claimed by omitting    these tags is added to the free sp [...]
-
-mne_anonymize recognizes
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---his**
-
-    Remove the FIFF_SUBJ_HIS_ID tag as well, see above.
-
-**\---file  <*name*>**
-
-    Specifies the name of the file to be modified.
-
-.. note:: You need write permission to the file to be    processed.
-
-.. _CJACECAH:
-
-Copying the processing history
-==============================
-
-In order for the inverse operator calculation to work correctly
-with data processed with the Elekta-Neuromag Maxfilter (TM) software,
-the so-called *processing history* block must
-be included in data files. Previous versions of the MNE Matlab functions
-did not copy processing history to files saved. As of March 30,
-2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have
-been enchanced to include these data to the output file as appropriate.
-If you have older raw data files created in Matlab from input which
-has been processed Maxfilter, it is necessary to copy the *processing
-history* block from the original to modified raw data
-file using the mne_copy_processing_history utility described
-below. The raw data processing programs mne_browse_raw and mne_process_raw have
-handled copying of the processing history since revision 2.5 of
-the MNE software.
-
-mne_copy_processing_history is
-simple to use:
-
-``mne_copy_processing_history --from``  <*from*> ``--to``  <*to*> ,
-
-where  <*from*> is an
-original raw data file containing the processing history and  <*to*> is
-a file output with older MNE Matlab routines. Be careful: this operation
-cannot be undone. If the  <*from*> file
-does not have the processing history block or the  <*to*> file
-already has it, the destination file remains unchanged.
-
-.. _CHDHJABJ:
-
-Creating a derivation file
-##########################
-
-Purpose
-=======
-
-In mne_browse_raw , channel
-derivations are defined as linear combinations of real channels
-existing in the data files. The utility mne_make_derivations reads
-derivation data from a suitably formatted text file and produces
-a fif file containing the weights of derived channels as a sparse
-matrix. Two input file formats are accepted:
-
-- A file containing arithmetic expressions
-  defining the derivations and
-
-- A file containing a matrix which specifies the weights of
-  the channels in each derivation.
-
-Both of these formats are described in
-
-Command-line options
-====================
-
-mne_make_derivations recognizes
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in  <*name*>**
-
-    Specifies a measurement file which contains the EEG electrode locations.
-    This file is not modified.
-
-**\---inmat  <*name*>**
-
-    Specifies the output file where the layout is stored. Suffix ``.lout`` is recommended
-    for layout files. mne_analyze and mne_browse_raw look
-    for the custom layout files from the directory ``$HOME/.mne/lout`` .
-
-**\---trans**
-
-    Indicates that the file specified with the ``--inmat`` option
-    contains a transpose of the derivation matrix.
-
-**\---thresh  <*value*>**
-
-    Specifies the threshold between values to be considered zero and non-zero
-    in the input file specified with the ``--inmat`` option.
-    The default threshold is :math:`10^{-6}`.
-
-**\---out  <*name*>**
-
-    Specifies output fif file to contain the derivation data. The recommended
-    name of the derivation file has the format  <:math:`name`> ``-deriv.fif`` .
-
-**\---list  <*name*>**
-
-    List the contents of a derivation file to standard output. If this
-    option is missing and ``--out`` is specified, the content
-    of the output file will be listed once it is complete. If neither ``--list`` nor ``--out`` is present,
-    and ``--in`` or ``--inmat`` is specified, the
-    interpreted contents of the input file is listed.
-
-Derivation file formats
-=======================
-
-All lines in the input files starting with the pound sign
-(#) are considered to be comments. The format of a derivation in
-a arithmetic input file is:
-
-.. math::    \langle name \rangle = [\langle w_1 \rangle *] \langle name_1 \rangle + [\langle w_2 \rangle *] \langle name_2 \rangle \dotso
-
-where <:math:`name`> is the
-name of the derived channel, :math:`name_k` are
-the names of the channels comprising the derivation, and :math:`w_k` are
-their weights. Note that spaces are necessary between the items.
-Channel names containing spaces must be put in quotes. For example,
-
-``EEG-diff = "EEG 003" - "EEG 002"``
-
-defines a channel ``EEG-diff`` which is a difference
-between ``EEG 003`` and ``EEG 002`` . Similarly,
-
-``EEG-der = 3 * "EEG 010" - 2 * "EEG 002"``
-
-defines a channel which is three times ``EEG 010`` minus
-two times ``EEG 002`` .
-
-The format of a matrix derivation file is:
-
-.. math::    \langle nrow \rangle \langle ncol \rangle \langle names\ of\ the\ input\ channels \rangle \langle name_1 \rangle \langle weights \rangle \dotso
-
-The combination of the two arithmetic examples, above can
-be thus represented as:
-
-``2 3 "EEG 002" "EEG 003" "EEG 010" EEG-diff -1 1  0 EEG-der -2 0  3``
-
-Before a derivation is accepted to use by mne_browse_raw ,
-the following criteria have to be met:
-
-- All channels to be combined into a single
-  derivation must have identical units of measure.
-
-- All channels in a single derivation have to be of the same
-  kind, *e.g.*, MEG channels or EEG channels.
-
-- All channels specified in a derivation have to be present
-  in the currently loaded data set.
-
-The validity check is done when a derivation file is loaded
-into mne_browse_raw , see :ref:`CACFHAFH`.
-
-.. note:: You might consider renaming the EEG channels    with descriptive labels related to the standard 10-20 system using    the mne_rename_channels utility,    see :ref:`CHDCFEAJ`. This allows you to use standard EEG    channel names in the derivations you define as well as in the channel    selection files used in mne_browse_raw ,    see :ref:`CACCJEJD`.
-
-.. _CHDDGDJA:
-
-Creating a custom EEG layout
-############################
-
-Purpose
-=======
-
-Both MNE software (mne_analyze and mne_browse_raw)
-and Neuromag software (xplotter and xfit)
-employ text layout files to create topographical displays of MEG
-and EEG data. While the MEG channel layout is fixed, the EEG layout
-varies from experiment to experiment, depending on the number of
-electrodes used and the electrode cap configuration. The utility mne_make_eeg_layout was
-created to produce custom EEG layout files based on the EEG electrode
-location information included in the channel description records.
-
-mne_make_eeg_layout uses
-azimuthal equidistant projection to map the EEG channel locations
-onto a plane. The mapping consists of the following steps:
-
-- A sphere is fitted to the electrode
-  locations and the locations are translated by the location of the
-  origin of the best-fitting sphere.
-
-- The spherical coordinates (:math:`r_k`, :math:`\theta_k`, and :math:`\phi_k`)
-  corresponding to each translated electrode location are computed.
-
-- The projected locations :math:`u_k = R \theta_k \cos{\phi_k}` and :math:`v_k = R \theta_k \sin{\phi_k}` are
-  computed. By default, :math:`R = 20/{^{\pi}/_2}`, *i.e.* at
-  the equator (:math:`\theta = ^{\pi}/_2`) the multiplier is
-  20. This projection radius can be adjusted with the ``--prad`` option.
-  Increasing or decreasing :math:`R` makes
-  the spacing between the channel viewports larger or smaller, respectively.
-
-- A viewport with width 5 and height 4 is placed centered at
-  the projected location. The width and height of the viewport can
-  be adjusted with the ``--width`` and ``--height`` options
-
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---lout  <*name*>**
-
-    Specifies the name of the layout file to be output.
-
-**\---nofit**
-
-    Do not fit a sphere to the electrode locations but use a standard sphere
-    center (:math:`x = y = 0`, and :math:`z = 40` mm) instead.
-
-**\---prad  <*value*>**
-
-    Specifies a non-standard projection radius :math:`R`,
-    see above.
-
-**\---width  <*value*>**
-
-    Specifies the width of the viewports. Default value = 5.
-
-**\---height  <*value*>**
-
-    Specifies the height of the viewports. Default value = 4.
-
-.. _BEHCBCGG:
-
-Adding neighborhood/topology information to source spaces
-#########################################################
-
-Purpose
-=======
-
-The utility mne_add_patch_info uses
-the detailed cortical surface geometry information to add data about
-cortical patches corresponding to each source space point. A new
-copy of the source space(s) included in the input file is created
-with the patch information included. In addition to the patch information, mne_add_patch_info can
-optionally calculate distances, along the cortical surface, between
-the vertices selected to the source space.
-
-.. note:: Depending on the speed of your computer and the options selected, mne_add_patch_info takes 5 - 30 minutes to run.
-
-.. _CJAGCDCC:
-
-Command line options
-====================
-
-mne_add_patch_info accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---verbose**
-
-    Provide verbose output during the calculations.
-
-**\---dist  <*dist/mm*>**
-
-    Invokes the calculation of distances between vertices included in
-    the source space along the cortical surface. Only pairs whose distance in
-    the three-dimensional volume is less than the specified distance are
-    considered. For details, see :ref:`CJAIFJDD`, below.
-
-**\---src  <*name*>**
-
-    The input source space file. The source space files usually end
-    with ``-src.fif`` .
-
-**\---srcp  <*name*>**
-
-    The output source space file which will contain the patch information.
-    If the file exists it will overwritten without asking for permission.
-    A recommended naming convention is to add the letter ``p`` after the
-    source spacing included in the file name. For example, if the input
-    file is ``mh-7-src.fif`` , a recommended output file name
-    is ``mh-7p-src.fif`` .
-
-**\---w  <*name*>**
-
-    Name of a w file, which will contain the patch area information. Two
-    files will be created:  <*name*> ``-lh.w`` and  <*name*> ``-rh.w`` .
-    The numbers in the files are patch areas in :math:`\text{mm}^2`.
-    The source space vertices are marked with value 150.
-
-**\---labeldir  <*directory*>**
-
-    Create a label file corresponding to each of the patches in the
-    given directory. The directory must be created before running mne_add_patch_info .
-
-.. _CJAIFJDD:
-
-Computational details
-=====================
-
-By default, mne_add_patch_info creates
-a copy of the source space(s) with the following additional information
-for each vertex in the original dense triangulation of the cortex:
-
-- The number of the closest active source
-  space vertex and
-
-- The distance to this vertex.
-
-This information can be used to determine, *e.g.*,
-the sizes of the patches, their average normals, and the standard
-deviation of the normal directions. This information is also returned
-by the mne_read_source_space Matlab function as described in Table 10.28.
-
-The ``--dist`` option to mne_add_patch_info invokes
-the calculation of inter-vertex distances. These distances are computed
-along the the cortical surface (usually the white matter) on which
-the source space vertices are located.
-
-Since the calculation of all possible distances would take
-a very long time, the distance given with the ``--dist`` option allows
-restriction to the neighborhood of each source space vertex. This
-neighborhood is defined as the sphere around each source space vertex,
-with radius given by the ``--dist`` option. Because the distance calculation
-is done along the folded cortical surface whose details are given
-by the dense triangulation of the cortical surface produced by FreeSurfer,
-some of the distances computed will be larger than the value give
-with --dist.
-
-Converting covariance data into an SSP operator
-###############################################
-
-Purpose
-=======
-
-The utility mne_cov2proj picks
-eigenvectors from a covariance matrix and outputs them as a signal-space
-projection (SSP) file.
-
-Command line options
-====================
-
-mne_cov2proj accepts the
-following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---cov  <*name*>**
-
-    The covariance matrix file to be used a source. The covariance matrix
-    files usually end with ``-cov.fif`` .
-
-**\---proj  <*name*>**
-
-    The output file to contain the projection. It is recommended that
-    the file name ends with ``-proj.fif`` .
-
-**\---bad  <*name*>**
-
-    Specify channels not to be included when an eigenvalue decomposition
-    of the covariance matrix is computed.
-
-**\---include  <*val1*> [: <*val2*> ]**
-
-    Select an eigenvector or a range of eigenvectors to include. It
-    is recommended that magnetometers, gradiometers, and EEG data are handled
-    separately with help of the ``--bad`` , ``--meg`` , ``--megmag`` , ``--meggrad`` ,
-    and ``--eeg`` options.
-
-**\---meg**
-
-    After loading the covariance matrix, modify it so that only elements corresponding
-    to MEG channels are included.
-
-**\---eeg**
-
-    After loading the covariance matrix, modify it so that only elements corresponding
-    to EEG channels are included.
-
-**\---megmag**
-
-    After loading the covariance matrix, modify it so that only elements corresponding
-    to MEG magnetometer channels are included.
-
-**\---meggrad**
-
-    After loading the covariance matrix, modify it so that only elements corresponding
-    to MEG planar gradiometer channels are included.
-
-.. note:: The ``--megmag`` and ``--meggrad`` employ    the Vectorview channel numbering scheme to recognize MEG magnetometers    (channel names ending with '1') and planar gradiometers    (other channels). Therefore, these options are only meaningful in    conjunction with data acquired with a Neuromag Vectorview system.
-
-.. _CHDECHBF:
-
-Fitting a sphere to a surface
-#############################
-
-Purpose
-=======
-
-The utility mne_fit_sphere_to_surf finds
-the sphere which best fits a given surface.
-
-Command line options
-====================
-
-mne_fit_sphere_to_surf accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---bem  <*name*>**
-
-    A BEM file to use. The names of these files usually end with ``bem.fif`` or ``bem-sol.fif`` .
-
-**\---surf  <*name*>**
-
-    A FreeSurfer surface file to read. This is an alternative to using
-    a surface from the BEM file.
-
-**\---scalp**
-
-    Use the scalp surface instead of the inner skull surface in sphere
-    fitting. If the surface is specified with the ``--surf`` option,
-    this one is irrelevant.
-
-**\---mritrans  <*name*>**
-
-    A file containing a transformation matrix between the MEG head coordinates
-    and MRI coordinates. With this option, the sphere origin will be
-    output in MEG head coordinates. Otherwise the output will be in MRI
-    coordinates.
-
-.. _CHDDCBGI:
-
-Computing sensitivity maps
-##########################
-
-Purpose
-=======
-
-mne_sensitivity_map computes
-the size of the columns of the forward operator and outputs the
-result in w files.
-
-Command line options
-====================
-
-mne_sensitivity_map accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fwd  <*name*>**
-
-    Specifies a forward solution file to analyze. By default the MEG
-    forward solution is considered.
-
-**\---proj  <*name*>**
-
-    Specifies a file containing an SSP operator to be applied. If necessary,
-    multiple ``--proj`` options can be specified. For map types 1 - 4 (see
-    below), SSP is applied to the forward model data. For map types
-    5 and 6, the effects of SSP are evaluated against the unmodified
-    forward model.
-
-**\---eeg**
-
-    Use the EEG forward solution instead of the MEG one. It does not make
-    sense to consider a combination because of the different units of
-    measure. For the same reason, gradiometers and magnetometers have
-    to be handled separately, see ``--mag`` option below. By
-    default MEG gradiometers are included.
-
-**\---mag**
-
-    Include MEG magnetometers instead of gradiometers
-
-**\---w  <*name*>**
-
-    Specifies the stem of the output w files. To obtain the final output file
-    names, ``-lh.w`` and ``-rh.w`` is appended for
-    the left and right hemisphere, respectively.
-
-**\---smooth  <*number*>**
-
-    Specifies the number of smooth steps to apply to the resulting w files.
-    Default: no smoothing.
-
-**\---map  <*number*>**
-
-    Select the type of a sensitivity map to compute. At present, valid numbers
-    are 1 - 6. For details, see :ref:`CHDCDJIJ`, below.
-
-.. _CHDCDJIJ:
-
-Available sensitivity maps
-==========================
-
-In the following, let
-
-.. math::    G_k = [g_{xk} g_{yk} g_{zk}]
-
-denote the three consecutive columns of the gain matrix :math:`G` corresponding to
-the fields of three orthogonal dipoles at source space location :math:`k`.
-Further, lets assume that the source coordinate system has been
-selected so that the :math:`z` -axis points
-to the cortical normal direction and the :math:`xy` plane
-is thus the tangent plane of the cortex at the source space location :math:`k`
-Next, compute the SVD
-
-.. math::    G_k = U_k \Lambda_k V_k
-
-and let :math:`g_{1k} = u_{1k} \lambda_{1k}`, where :math:`\lambda_{1k}` and :math:`u_{1k}` are
-the largest singular value and the corresponding left singular vector
-of :math:`G_k`, respectively. It is easy to see
-that :math:`g_{1k}` is has the largest power
-among the signal distributions produced by unit dipoles at source
-space location :math:`k`.
-
-Furthermore, assume that the colums orthogonal matrix :math:`U_P` (:math:`U_P^T U_P = I`) contain
-the orthogonal basis of the noise subspace corresponding to the signal
-space projection (SSP) operator :math:`P` specified
-with one or more ``--proj`` options so that :math:`P = I - U_P U_P^T`.
-For more information on SSP, see :ref:`CACCHABI`.
-
-With these definitions the map selections defined with the ``--map`` option correspond
-to the following
-
-**\---map 1**
-
-    Compute :math:`\sqrt{g_{1k}^T g_{1k}} = \lambda_{1k}` at each source space point.
-    Normalize the result so that the maximum values equals one.
-
-**\---map 2**
-
-    Compute :math:`\sqrt{g_z^T g_z}` at each source space point.
-    Normalize the result so that the maximum values equals one. This
-    is the amplitude of the signals produced by unit dipoles normal
-    to the cortical surface.
-
-**\---map 3**
-
-    Compute :math:`\sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
-
-**\---map 4**
-
-    Compute :math:`1 - \sqrt{g_z^T g_z / g_{1k}^T g_{1k}}` at each source space point.
-    This could be called the *radiality index*.
-
-**\---map 5**
-
-    Compute the subspace correlation between :math:`g_z` and :math:`U_P`: :math:`\text{subcorr}^2(g_z , U_P) = (g_z^T U_P U_P^T g_z)/(g_z^T g_z)`.
-    This index equals zero, if :math:`g_z` is
-    orthogonal to :math:`U_P` and one if :math:`g_z` lies
-    in the subspace defined by :math:`U_P`. This
-    map shows how close the field pattern of a dipole oriented perpendicular
-    to the cortex at each cortical location is to the subspace removed
-    by the SSP.
-
-**\---map 6**
-
-    Compute :math:`\sqrt{g_z^T P g_z / g_z^T g_z}`, which is the fraction
-    of the field pattern of a dipole oriented perpendicular to the cortex
-    at each cortical location remaining after applying the SSP a dipole
-    remaining
-
-.. _CHDDDJCA:
-
-Transforming locations
-######################
-
-Purpose
-=======
-
-mne_transform_points applies
-the coordinate transformation relating the MEG head coordinates
-and the MRI coordinates to a set of locations listed in a text file.
-
-Command line options
-====================
-
-mne_transform_points accepts
-the following command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in  <*name*>**
-
-    Specifies the input file. The file must contain three numbers on
-    each line which are the *x*, *y*,
-    and *z* coordinates of point in space. By default,
-    the input is in millimeters.
-
-**\---iso  <*name*>**
-
-    Specifies a name of a fif file containing Isotrak data. If this
-    option is present file will be used as the input instead of the
-    text file specified with the ``--in`` option.
-
-**\---trans  <*name*>**
-
-    Specifies the name of a fif file containing the coordinate transformation
-    between the MEG head coordinates and MRI coordinates. If this file
-    is not present, the transformation will be replaced by a unit transform.
-
-**\---out  <*name*>**
-
-    Specifies the output file. This file has the same format as the
-    input file.
-
-**\---hpts**
-
-    Output the data in the head points (hpts)
-    format accepted by tkmedit . In
-    this format, the coordinates are preceded by a point category (hpi,
-    cardinal or fiducial, eeg, extra) and a sequence number, see :ref:`CJADJEBH`.
-
-**\---meters**
-
-    The coordinates are listed in meters rather than millimeters.
-
-**\---tomri**
-
-    By default, the coordinates are transformed from MRI coordinates to
-    MEG head coordinates. This option reverses the transformation to
-    be from MEG head coordinates to MRI coordinates.
-
-.. _CHDDIDCC:
-
-Inquiring and changing baselines
-################################
-
-The utility mne_change_baselines computes
-baseline values and applies them to an evoked-response data file.
-The command-line options are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---in  <*name*>**
-
-    Specifies the input data file.
-
-**\---set  <*number*>**
-
-    The data set number to compute baselines from or to apply baselines
-    to. If this option is omitted, all average data sets in the input file
-    are processed.
-
-**\---out  <*name*>**
-
-    The output file.
-
-**\---baselines  <*name*>**
-
-    Specifies a text file which contains the baseline values to be applied. Each
-    line should contain a channel name, colon, and the baseline value
-    given in 'native' units (T/m, T, or V). If this
-    option is encountered, the limits specified by previous ``--bmin`` and ``--bmax`` options will not
-    have an effect.
-
-**\---list  <*name*>**
-
-    Specifies a text file to contain the baseline values. Listing is
-    provided only if a specific data set is selected with the ``--set`` option.
-
-**\---bmin  <*value/ms*>**
-
-    Lower limit of the baseline. Effective only if ``--baselines`` option is
-    not present. Both ``--bmin`` and ``--bmax`` must
-    be present to compute the baseline values. If either ``--bmin`` or ``--bmax`` is
-    encountered, previous ``--baselines`` option will be ignored.
-
-**\---bmax  <*value/ms*>**
-
-    Upper limit of the baseline.
-
-.. _CHDECAFD:
-
-Data simulator
-##############
-
-Purpose
-=======
-
-The utility mne_simu creates
-simulated evoked response data for investigation of the properties
-of the inverse solutions. It computes MEG signals generated by dipoles
-normal to the cortical mantle at one or several ROIs defined with
-label files. Colored noise can be added to the signals.
-
-Command-line options
-====================
-
-mne_simu has the following
-command-line options:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---fwd  <*name*>**
-
-    Specify a forward solution file to employ in the simulation.
-
-**\---label  <*name*>**
-
-    Specify a label
-
-**\---meg**
-
-    Provide MEG data in the output file.
-
-**\---eeg**
-
-    Provide EEG data in the output file.
-
-**\---out  <*name*>**
-
-    Specify the output file. By default, this will be an evoked data
-    file in the fif format.
-
-**\---raw**
-
-    Output the data as a raw data fif file instead of an evoked one.
-
-**\---mat**
-
-    Produce Matlab output of the simulated fields instead of the fif evoked
-    file.
-
-**\---label  <*name*>**
-
-    Define an ROI. Several label files can be present. By default, the sources
-    in the labels will have :math:`\cos^2` -shaped non-overlapping
-    timecourses, see below.
-
-**\---timecourse  <*name*>**
-
-    Specifies a text file which contains an expression for a source
-    time course, see :ref:`CHDCFIBH`. If no --timecourse options
-    are present, the standard source time courses described in :ref:`CHDFIIII` are used. Otherwise, the time course expressions
-    are read from the files specified. The time course expressions are
-    associated with the labels in the order they are specified. If the
-    number of expressions is smaller than the number of labels, the
-    last expression specified will reused for the remaining labels.
-
-**\---sfreq  <*freq/Hz*>**
-
-    Specifies the sampling frequency of the output data (default = 1000 Hz). This
-    option is used only with the time course files.
-
-**\---tmin  <*time/ms*>**
-
-    Specifies the starting time of the data, used only with time course files
-    (default -200 ms).
-
-**\---tmax  <*time/ms*>**
-
-    Specifies the ending time of the data, used only with time course files
-    (default 500 ms).
-
-**\---seed  <*number*>**
-
-    Specifies the seed for random numbers. This seed is used both for adding
-    noise, see :ref:`CHDFBJIJ` and for random numbers in source waveform
-    expressions, see :ref:`CHDCFIBH`. If no seed is specified, the
-    current time in seconds since Epoch (January 1, 1970) is used.
-
-**\---all**
-
-    Activate all sources on the cortical surface uniformly. This overrides the ``--label`` options.
-
-.. _CHDFBJIJ:
-
-Noise simulation
-================
-
-Noise is added to the signals if the ``--senscov`` and ``--nave`` options
-are present. If ``--nave`` is omitted the number of averages
-is set to :math:`L = 100`. The noise is computed
-by first generating vectors of Gaussian random numbers :math:`n(t)` with :math:`n_j(t) \sim N(0,1)`.
-Thereafter, the noise-covariance matrix :math:`C` is
-used to color the noise:
-
-.. math::    n_c(t) = \frac{1}{\sqrt{L}} \Lambda U^T n(t)\ ,
-
-where we have used the eigenvalue decomposition positive-definite
-covariance matrix:
-
-.. math::    C = U \Lambda^2 U^T\ .
-
-Note that it is assumed that the noise-covariance matrix
-is given for raw data, *i.e.*, for :math:`L = 1`.
-
-.. _CHDFIIII:
-
-Simulated data
-==============
-
-The default source waveform :math:`q_k` for
-the :math:`k^{th}` label is nonzero at times :math:`t_{kp} = (100(k - 1) + p)/f_s`, :math:`p = 0 \dotso 100` with:
-
-.. math::    q_k(t_{kp}) = Q_k \cos^2{(\frac{\pi p}{100} - \frac{\pi}{2})}\ ,
-
-i.e., the source waveforms are non-overlapping 100-samples
-wide :math:`\cos^2` pulses. The sampling frequency :math:`f_s = 600` Hz.
-The source amplitude :math:`Q_k` is determined
-so that the strength of each of the dipoles in a label will be :math:`50 \text{nAm}/N_k`.
-
-Let us denote the sums of the magnetic fields and electric
-potentials produced by the dipoles normal to the cortical mantle
-at label :math:`k` by :math:`x_k`. The simulated
-signals are then:
-
-.. math::    x(t_j) = \sum_{k = 1}^{N_s} {q_k(t_j) x_k + n_c(t_j)}\ ,
-
-where :math:`N_s` is the number of
-sources.
-
-.. _CHDCFIBH:
-
-Source waveform expressions
-===========================
-
-The ``--timecourse`` option provides flexible possibilities
-to define the source waveforms in a functional form. The source
-waveform expression files consist of lines of the form:
-
- <*variable*> ``=``  <*arithmetic expression*>
-
-Each file may contain multiple lines. At the end of the evaluation,
-only the values in the variable ``y`` (``q`` )
-are significant, see :ref:`CHDJBIEE`. They assume the role
-of :math:`q_k(t_j)` to compute the simulated signals
-as described in :ref:`CHDFIIII`, above.
-
-All expressions are case insensitive. The variables are vectors
-with the length equal to the number of samples in the responses,
-determined by the ``--tmin`` , ``--tmax`` , and ``--sfreq`` options.
-The available variables are listed in :ref:`CHDJBIEE`.
-
-.. _CHDJBIEE:
-
-.. table:: Available variable names in source waveform expressions.
-
-    ================  =======================================
-    Variable          Meaning
-    ================  =======================================
-    x                 time [s]
-    t                 current value of x in [ms]
-    y                 the source amplitude [Am]
-    q                 synonym for y
-    a , b , c , d     help variables, initialized to zeros
-    ================  =======================================
-
-The arithmetic expressions can use usual arithmetic operations
-as well as  mathematical functions listed in :ref:`CHDJIBHA`.
-The arguments can be vectors or scalar numbers. In addition, standard
-relational operators ( <, >, ==, <=, >=) and their textual
-equivalents (lt, gt, eq, le, ge) are available. Table :ref:`CHDDJEHH` gives some
-useful examples of source waveform expressions.
-
-.. tabularcolumns:: |p{0.2\linewidth}|p{0.6\linewidth}|
-.. _CHDJIBHA:
-.. table:: Mathematical functions available for source waveform expressions
-
-    +-----------------------+---------------------------------------------------------------+
-    | Function              | Description                                                   |
-    +-----------------------+---------------------------------------------------------------+
-    | abs(x)                | absolute value                                                |
-    +-----------------------+---------------------------------------------------------------+
-    | acos(x)               | :math:`\cos^{-1}x`                                            |
-    +-----------------------+---------------------------------------------------------------+
-    | asin(x)               | :math:`\sin^{-1}x`                                            |
-    +-----------------------+---------------------------------------------------------------+
-    | atan(x)               | :math:`\tan^{-1}x`                                            |
-    +-----------------------+---------------------------------------------------------------+
-    | atan2(x,y)            | :math:`\tan^{-1}(^y/_x)`                                      |
-    +-----------------------+---------------------------------------------------------------+
-    | ceil(x)               | nearest integer larger than :math:`x`                         |
-    +-----------------------+---------------------------------------------------------------+
-    | cos(x)                | :math:`\cos x`                                                |
-    +-----------------------+---------------------------------------------------------------+
-    | cosw(x,a,b,c)         | :math:`\cos^2` -shaped window centered at :math:`b` with a    |
-    |                       | rising slope of length :math:`a` and a trailing slope of      |
-    |                       | length :math:`b`.                                             |
-    +-----------------------+---------------------------------------------------------------+
-    | deg(x)                | The value of :math:`x` converted to from radians to degrees   |
-    +-----------------------+---------------------------------------------------------------+
-    | erf(x)                | :math:`\frac{1}{2\pi} \int_0^x{\text{exp}(-t^2)dt}`           |
-    +-----------------------+---------------------------------------------------------------+
-    | erfc(x)               | :math:`1 - \text{erf}(x)`                                     |
-    +-----------------------+---------------------------------------------------------------+
-    | exp(x)                | :math:`e^x`                                                   |
-    +-----------------------+---------------------------------------------------------------+
-    | floor(x)              | Largest integer value not larger than :math:`x`               |
-    +-----------------------+---------------------------------------------------------------+
-    | hypot(x,y)            | :math:`\sqrt{x^2 + y^2}`                                      |
-    +-----------------------+---------------------------------------------------------------+
-    | ln(x)                 | :math:`\ln x`                                                 |
-    +-----------------------+---------------------------------------------------------------+
-    | log(x)                | :math:`\log_{10} x`                                           |
-    +-----------------------+---------------------------------------------------------------+
-    | maxp(x,y)             | Takes the maximum between :math:`x` and :math:`y`             |
-    +-----------------------+---------------------------------------------------------------+
-    | minp(x,y)             | Takes the minimum between :math:`x` and :math:`y`             |
-    +-----------------------+---------------------------------------------------------------+
-    | mod(x,y)              | Gives the remainder of  :math:`x` divided by :math:`y`        |
-    +-----------------------+---------------------------------------------------------------+
-    | pi                    | Ratio of the circumference of a circle and its diameter.      |
-    +-----------------------+---------------------------------------------------------------+
-    | rand                  | Gives a vector of uniformly distributed random numbers        |
-    |                       | from 0 to 1.                                                  |
-    +-----------------------+---------------------------------------------------------------+
-    | rnorm(x,y)            | Gives a vector of Gaussian random numbers distributed as      |
-    |                       | :math:`N(x,y)`. Note that if :math:`x` and :math:`y` are      |
-    |                       | vectors, each number generated will a different mean and      |
-    |                       | variance according to the arguments.                          |
-    +-----------------------+---------------------------------------------------------------+
-    | shift(x,s)            | Shifts the values in the input vector :math:`x` by the number |
-    |                       | of positions given by :math:`s`. Note that :math:`s` must be  |
-    |                       | a scalar.                                                     |
-    +-----------------------+---------------------------------------------------------------+
-    | sin(x)                | :math:`\sin x`                                                |
-    +-----------------------+---------------------------------------------------------------+
-    | sqr(x)                | :math:`x^2`                                                   |
-    +-----------------------+---------------------------------------------------------------+
-    | sqrt(x)               | :math:`\sqrt{x}`                                              |
-    +-----------------------+---------------------------------------------------------------+
-    | tan(x)                | :math:`\tan x`                                                |
-    +-----------------------+---------------------------------------------------------------+
-
-
-.. tabularcolumns:: |p{0.4\linewidth}|p{0.4\linewidth}|
-.. _CHDDJEHH:
-.. table:: Examples of source waveform expressions.
-
-    +---------------------------------------------+-------------------------------------------------------------+
-    | Expression                                  | Meaning                                                     |
-    +---------------------------------------------+-------------------------------------------------------------+
-    | q = 20e-9*sin(2*pi*10*x)                    | A 10-Hz sine wave with 20 nAm amplitude                     |
-    +---------------------------------------------+-------------------------------------------------------------+
-    | q = 20e-9*sin(2*pi*2*x)*sin(2*pi*10*x)      | A 10-Hz 20-nAm sine wave, amplitude modulated               |
-    |                                             | sinusoidally at 2 Hz.                                       |
-    +---------------------------------------------+-------------------------------------------------------------+
-    | q = 20e-9*cosw(t,100,100,100)               | :math:`\cos^2`-shaped pulse, centered at :math:`t` = 100 ms |
-    |                                             | with 100 ms leading and trailing slopes, 20 nAm amplitude   |
-    +---------------------------------------------+-------------------------------------------------------------+
-    | q = 30e-9*(t > 0)*(t  <* 300)*sin(2*pi*20*x)| 20-Hz sine wave, 30 nAm amplitude, cropped in time to       |
-    |                                             | 0...300 ms.                                                 |
-    +---------------------------------------------+-------------------------------------------------------------+
-
-.. _CHDEDHCG:
-
-Converting parcellation data into labels
-########################################
-
-The utility mne_annot2labels converts
-cortical parcellation data into a set of labels. The parcellation
-data are read from the directory ``$SUBJECTS_DIR/$SUBJECT/label`` and
-the resulting labels are written to the current directory. mne_annot2labels requires
-that the environment variable ``$SUBJECTS_DIR`` is set.
-The command line options for mne_annot2labels are:
-
-**\---version**
-
-    Show the program version and compilation date.
-
-**\---help**
-
-    List the command-line options.
-
-**\---subject  <*name*>**
-
-    Specifies the name of the subject. If this option is not present
-    the ``$SUBJECT`` environment variable is consulted. If
-    the subject name cannot be determined, the program quits.
-
-**\---parc  <*name*>**
-
-    Specifies the parcellation name to convert. The corresponding parcellation
-    file names will be ``$SUBJECTS_DIR/$SUBJECT/label/``  <*hemi*> ``h.``  <*name*> ``.annot`` where  <*hemi*> is ``l`` or ``r`` for the
-    left and right hemisphere, respectively.
diff --git a/doc/source/mne-python.rst b/doc/source/mne-python.rst
deleted file mode 100644
index 1380044..0000000
--- a/doc/source/mne-python.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-.. _mne_python:
-
-======================
-MNE with Python
-======================
-
-.. toctree::
-   :maxdepth: 1
-
-   getting_started.rst
-   python_tutorial.rst
-   mne_report_tutorial.rst
-   auto_examples/index.rst
-   python_reference.rst
-   whats_new.rst
-   contributing.rst
-
-.. raw:: html
-
-    <div>
-    <div style="width: 40%; float: left; padding: 20px;">
-        <a class="twitter-timeline" href="https://twitter.com/mne_python" data-widget-id="317730454184804352">Tweets by @mne_python</a>
-    </div>
-    <div style="width: 40%; float: left; padding: 20px;">
-        <script type="text/javascript" src="http://www.ohloh.net/p/586838/widgets/project_basic_stats.js"></script>
-    </div>
-    </div>
diff --git a/doc/source/python_tutorial.rst b/doc/source/python_tutorial.rst
deleted file mode 100644
index 539ddde..0000000
--- a/doc/source/python_tutorial.rst
+++ /dev/null
@@ -1,396 +0,0 @@
-.. _mne_python_tutorial:
-
-=========================================================
-Tutorial: MEG and EEG data processing with MNE and Python
-=========================================================
-
-Python offers transparent scripting on top of MNE.
-It was designed to be an alternative to the MNE matlab toolbox
-but now it can do much more (customize events, compute
-contrasts, statistics, time-frequency analysis etc.)
-It uses the same files as standard MNE unix commands:
-no need to convert your files to a new system or database.
-
-What you can do with MNE Python
--------------------------------
-
-    - **Raw data visualization** to visualize recordings, can also use *mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
-    - **Epoching**: Define epochs, baseline correction, handle conditions etc.
-    - **Averaging** to get Evoked data
-    - **Compute SSP pojectors** to remove ECG and EOG artifacts
-    - **Compute ICA** to remove artifacts or select latent sources.
-    - **Forward modeling**: BEM computation and mesh creation (see :ref:`ch_forward`)
-    - **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
-    - **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map, Time-Frequency MxNE)
-    - **Connectivity estimation** in sensor and source space
-    - **Visualization of sensor and source space data**
-    - **Time-frequency** analysis with Morlet wavelets (induced power, phase lock value) also in the source space
-    - **Spectrum estimation** using multi-taper method
-    - **Compute contrasts** between conditions, between sensors, across subjects etc.
-    - **Non-parametric statistics** in time, space and frequency (including cluster-level)
-    - **Scripting** (batch and parallel computing)
-
-What you're not supposed to do with MNE Python
-----------------------------------------------
-
-	- **Dipole fitting** use MNE or other designated software instead.
-	- **Boundary Element Modeling** use MNE and Freesurfer.
-
-
-.. note:: Package based on the FIF file format from Neuromag but can work with CTF and 4D after conversion to FIF.
-
-
-Installation of the required materials
----------------------------------------
-
-See :ref:`getting_started` with Python.
-
-
-.. note:: The expected location for the MNE-sample data is my-path-to/mne-python/examples.
-    If you downloaded data and an example asks you whether to download it again, make sure
-    the data reside in the examples directory and you run the script from its current directory.
-
-    From IPython e.g. say::
-
-    cd examples/preprocessing
-
-
-    %run plot_find_ecg_artifacts.py
-
-
-From raw data to evoked data
-----------------------------
-
-.. _ipython: http://ipython.scipy.org/
-
-Now, launch `ipython`_ (Advanced Python shell) using the QT backend which best supported across systems::
-
-  $ ipython -pylab -qt
-
-First, load the mne package:
-
-    >>> import mne
-
-If you'd like to turn information status messages off:
-
-    >>> mne.set_log_level('WARNING')
-
-But it's generally a good idea to leave them on:
-
-    >>> mne.set_log_level('INFO')
-
-You can set the default level by setting the environment variable
-"MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
-
-    >>> mne.set_config('MNE_LOGGING_LEVEL','WARNING') # doctest: +SKIP
-
-Note that the location of the mne-python preferences file (for easier manual
-editing) can be found using:
-
-    >>> mne.get_config_path() # doctest: +SKIP
-
-By default logging messages print to the console, but look at
-mne.set_log_file() to save output to a file.
-
-Access raw data
-^^^^^^^^^^^^^^^
-
-    >>> from mne.datasets import sample
-    >>> data_path = sample.data_path()
-    >>> raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-    >>> print(raw_fname) # doctest: +SKIP
-    ./MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw.fif
-
-.. note:: The MNE sample dataset should be downloaded automatically but be patient (approx. 2GB)
-
-Read data from file:
-
-    >>> raw = mne.io.Raw(raw_fname) # doctest:+ELLIPSIS
-    Opening raw data ...
-    Ready.
-    >>> print(raw)
-    <Raw  |  n_channels x n_times : 376 x 41700>
-    >>> print(raw.info) # doctest:+ELLIPSIS
-    <Info | 17 non-empty ...
-
-Look at the channels in raw:
-
-    >>> print(raw.ch_names) # doctest:+ELLIPSIS
-    ['MEG 0113', 'MEG 0112', ...]
-
-Read and plot a segment of raw data
-
-    >>> start, stop = raw.time_as_index([100, 115])  # 100 s to 115 s data segment
-    >>> data, times = raw[:, start:stop]
-    Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
-    [done]
-    >>> print(data.shape)
-    (376, 2252)
-    >>> print(times.shape)
-    (2252,)
-    >>> data, times = raw[2:20:3, start:stop]  # access underlying data
-    Reading 15015 ... 17266  =     99.998 ...   114.989 secs...
-    [done]
-    >>> raw.plot() # doctest: +SKIP
-
-.. figure:: _images/plot_read_and_write_raw_data.png
-    :alt: Raw data
-
-Save a segment of 150s of raw data (MEG only):
-
-    >>> picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, exclude='bads')
-    >>> raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks, overwrite=True) # doctest: +ELLIPSIS
-    Reading ...
-
-Define and read epochs
-^^^^^^^^^^^^^^^^^^^^^^
-
-First extract events:
-
-    >>> events = mne.find_events(raw, stim_channel='STI 014')
-    Reading 0 ... 41699  =      0.000 ...   277.709 secs...
-    [done]
-    319 events found
-    Events id: [ 1  2  3  4  5 32]
-    >>> print(events[:5])
-    [[6994    0    2]
-     [7086    0    3]
-     [7192    0    1]
-     [7304    0    4]
-     [7413    0    2]]
-
-Note that, by default, we use stim_channel='STI 014'. If you have a different
-system (e.g., a newer system that uses channel 'STI101' by default), you can
-use the following to set the default stim channel to use for finding events:
-
-    >>> mne.set_config('MNE_STIM_CHANNEL', 'STI101') # doctest: +SKIP
-
-Events are stored as 2D numpy array where the first column is the time instant
-and the last one is the event number. It is therefore easy to manipulate.
-
-Define epochs parameters:
-
-    >>> event_id = dict(aud_l=1, aud_r=2)  # event trigger and conditions
-    >>> tmin = -0.2  # start of each epoch (200ms before the trigger)
-    >>> tmax = 0.5  # end of each epoch (500ms after the trigger)
-
-Exclude some channels (original bads + 2 more):
-
-    >>> raw.info['bads'] += ['MEG 2443', 'EEG 053']
-
-The variable raw.info['bads'] is just a python list.
-
-Pick the good channels, excluding raw.info['bads']:
-
-    >>> picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False, exclude='bads')
-
-Alternatively one can restrict to magnetometers or gradiometers with:
-
-    >>> mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
-    >>> grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
-
-Define the baseline period:
-
-    >>> baseline = (None, 0)  # means from the first instant to t = 0
-
-Define peak-to-peak rejection parameters for gradiometers, magnetometers and EOG:
-
-    >>> reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
-
-Read epochs:
-
-    >>> epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=baseline, preload=False, reject=reject)
-    Created an SSP operator (subspace dimension = 4)
-    4 projection items activated
-    145 matching events found
-    >>> print(epochs)
-    <Epochs  |  n_events : 145 (good & bad), tmin : -0.2 (s), tmax : 0.5 (s), baseline : (None, 0),
-     'aud_l': 72, 'aud_r': 73>
-
-Get single epochs for one condition:
-
-    >>> epochs_data = epochs['aud_l'].get_data() # doctest: +ELLIPSIS
-    Reading ...
-    >>> print(epochs_data.shape)
-    (55, 365, 106)
-
-epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time instants).
-
-Scipy supports read and write of matlab files. You can save your single trials with:
-
-    >>> from scipy import io
-    >>> io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
-
-or if you want to keep all the information about the data you can save your epochs
-in a fif file:
-
-    >>> epochs.save('sample-epo.fif') # doctest: +ELLIPSIS
-    Reading ...
-
-and read them later with:
-
-    >>> saved_epochs = mne.read_epochs('sample-epo.fif') # doctest: +ELLIPSIS
-    Reading ...
-
-Compute evoked responses for auditory responses by averaging and plot it:
-
-    >>> evoked = epochs['aud_l'].average() # doctest: +ELLIPSIS
-    Reading ...
-    >>> print(evoked)
-    <Evoked  |  comment : 'aud_l', time : [-0.199795, 0.499488], n_epochs : 55, n_channels x n_times : 364 x 106>
-    >>> evoked.plot() # doctest:+SKIP
-
-.. figure:: _images/plot_read_epochs.png
-    :alt: Evoked data
-
-.. topic:: Exercise
-
-  1. Extract the max value of each epoch
-
-  >>> max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
-  Reading ...
-  >>> print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
-  [1.93751...e-05, 1.64055...e-05, 1.85453...e-05, 2.04128...e-05]
-
-It is also possible to read evoked data stored in a fif file:
-
-    >>> evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
-    >>> evoked1 = mne.read_evokeds(evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
-    Reading .../MNE-sample-data/MEG/sample/sample_audvis-ave.fif ...
-        Read a total of 4 projection items:
-            PCA-v1 (1 x 102) active
-            PCA-v2 (1 x 102) active
-            PCA-v3 (1 x 102) active
-            Average EEG reference (1 x 60) active
-        Found the data of interest:
-            t =    -199.80 ...     499.49 ms (Left Auditory)
-            0 CTF compensation matrices available
-            nave = 55 - aspect type = 100
-    Projections have already been applied. Doing nothing.
-    Applying baseline correction ... (mode: mean)
-
-Or another one stored in the same file:
-
-    >>> evoked2 = mne.read_evokeds(evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True) # doctest: +ELLIPSIS
-    Reading ...
-
-Compute a contrast:
-
-    >>> contrast = evoked1 - evoked2
-
-    >>> print(contrast)
-    <Evoked  |  comment : 'Left Auditory - Right Auditory', time : [-0.199795, 0.499488], n_epochs : 116, n_channels x n_times : 376 x 421>
-
-Time-Frequency: Induced power and phase-locking values
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Define parameters:
-
-    >>> import numpy as np
-    >>> n_cycles = 2  # number of cycles in Morlet wavelet
-    >>> frequencies = np.arange(7, 30, 3)  # frequencies of interest
-    >>> Fs = raw.info['sfreq']  # sampling in Hz
-
-Compute induced power and phase-locking values:
-
-    >>> from mne.time_frequency import induced_power
-    >>> power, phase_lock = induced_power(epochs_data, Fs=Fs, frequencies=frequencies, n_cycles=2, n_jobs=1)
-
-.. figure:: _images/plot_time_frequency.png
-    :alt: Time-Frequency
-
-Inverse modeling: MNE and dSPM on evoked and raw data
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Import the required functions:
-
-    >>> from mne.minimum_norm import apply_inverse, read_inverse_operator
-
-Read the inverse operator:
-
-    >>> fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
-    >>> inverse_operator = read_inverse_operator(fname_inv) # doctest: +ELLIPSIS
-    Reading ...
-
-Define the inverse parameters:
-
-    >>> snr = 3.0
-    >>> lambda2 = 1.0 / snr ** 2
-    >>> method = "dSPM"
-
-Compute the inverse solution:
-
-    >>> stc = apply_inverse(evoked, inverse_operator, lambda2, method)
-    Preparing the inverse operator for use...
-        Scaled noise and source covariance from nave = 1 to nave = 55
-        Created the regularized inverter
-        Created an SSP operator (subspace dimension = 3)
-        Created the whitener using a full noise covariance matrix (3 small eigenvalues omitted)
-        Computing noise-normalization factors (dSPM)...
-    [done]
-    Picked 305 channels from the data
-    Computing inverse...
-    (eigenleads need to be weighted)...
-    combining the current components...
-    (dSPM)...
-    [done]
-
-Save the source time courses to disk:
-
-    >>> stc.save('mne_dSPM_inverse')
-    Writing STC to disk...
-    [done]
-
-Now, let's compute dSPM on a raw file within a label:
-
-    >>> fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
-    >>> label = mne.read_label(fname_label)
-
-Compute inverse solution during the first 15s:
-
-    >>> from mne.minimum_norm import apply_inverse_raw
-    >>> start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
-    >>> stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label, start, stop)
-    Preparing the inverse operator for use...
-        Scaled noise and source covariance from nave = 1 to nave = 1
-        Created the regularized inverter
-        Created an SSP operator (subspace dimension = 3)
-        Created the whitener using a full noise covariance matrix (3 small eigenvalues omitted)
-        Computing noise-normalization factors (dSPM)...
-    [done]
-    Picked 305 channels from the data
-    Computing inverse...
-    Reading 0 ... 2251  =      0.000 ...    14.991 secs...
-    [done]
-    (eigenleads need to be weighted)...
-    combining the current components...
-    [done]
-
-Save result in stc files:
-
-    >>> stc.save('mne_dSPM_raw_inverse_Aud')
-    Writing STC to disk...
-    [done]
-
-What else can you do?
-^^^^^^^^^^^^^^^^^^^^^
-
-    - detect heart beat QRS component
-    - detect eye blinks and EOG artifacts
-    - compute SSP projections to remove ECG or EOG artifacts
-    - compute Independent Component Analysis (ICA) to remove artifacts or select latent sources
-    - estimate noise covariance matrix from Raw and Epochs
-    - visualize cross-trial response dynamics using epochs images
-    - compute forward solutions
-    - estimate power in the source space
-    - estimate connectivity in sensor and source space
-    - morph stc from one brain to another for group studies
-    - compute mass univariate statistics base on custom contrasts
-    - visualize source estimates
-    - export raw, epochs, and evoked data to other python data analysis libraries i.e. pandas and nitime
-
-
-Want to know more ?
-^^^^^^^^^^^^^^^^^^^
-
-Browse :ref:`examples-index` gallery.
diff --git a/doc/source/this_project.inc b/doc/source/this_project.inc
deleted file mode 100644
index 23ade2e..0000000
--- a/doc/source/this_project.inc
+++ /dev/null
@@ -1,5 +0,0 @@
-.. mne-python
-.. _mne-python: http://mne-tools.github.com/mne-python-intro
-.. _`mne-python GitHub`: http://github.com/mne-tools/mne-python
-.. _`mne-python sample dataset`: ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
-
diff --git a/doc/sphinxext/commands.py b/doc/sphinxext/commands.py
new file mode 100644
index 0000000..764f995
--- /dev/null
+++ b/doc/sphinxext/commands.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+import os
+import glob
+from os import path as op
+
+from mne.utils import run_subprocess
+
+
+def setup(app):
+    app.connect('builder-inited', generate_commands_rst)
+    # app.add_config_value('make_flow_diagram', True, 'html')
+
+
+def setup_module():
+    # HACK: Stop nosetests running setup() above
+    pass
+
+
+header = """
+
+.. _python_commands
+
+Command line tools
+==================
+
+.. contents:: Contents
+   :local:
+   :depth: 1
+
+"""
+
+command_rst = """
+
+.. _%s
+
+%s
+----------------------------------------------------------
+
+.. raw:: html
+
+   <div>
+   <pre>
+
+%s
+
+.. raw:: html
+
+   </pre>
+   </div>
+
+"""
+
+def generate_commands_rst(app):
+    out_dir = op.join(app.builder.outdir, 'generated')
+    out_fname = op.join(out_dir, 'commands.rst')
+
+    command_path = op.join(os.path.dirname(__file__), '..', '..', 'mne', 'commands')
+    print command_path
+    fnames = glob.glob(op.join(command_path, 'mne_*.py'))
+
+
+    with open(out_fname, 'w') as f:
+        f.write(header)
+        for fname in fnames:
+            cmd_name = op.basename(fname)[:-3]
+
+            output, _ = run_subprocess(['python', fname, '--help'])
+            f.write(command_rst % (cmd_name, cmd_name.replace('mne_', 'mne '), output))
+
+    print('Done')
+
+
+
+# This is useful for testing/iterating to see what the result looks like
+if __name__ == '__main__':
+    from mne.io.constants import Bunch
+    out_dir = op.abspath(op.join(op.dirname(__file__), '..'))
+    app = Bunch(builder=Bunch(outdir=out_dir))
+    generate_commands_rst(app)
diff --git a/doc/sphinxext/docscrape.py b/doc/sphinxext/docscrape.py
deleted file mode 100644
index a6d333e..0000000
--- a/doc/sphinxext/docscrape.py
+++ /dev/null
@@ -1,497 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-4
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self,docstring):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        summary = self._doc.read_to_next_empty_line()
-        summary_str = " ".join([s.strip() for s in summary]).strip()
-        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-            self['Signature'] = summary_str
-            if not self._is_at_section():
-                self['Summary'] = self._doc.read_to_next_empty_line()
-        else:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Attributes', 'Methods',
-                           'Returns', 'Raises', 'Warns'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                out += ['%s : %s' % (param, param_type)]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters','Returns','Raises'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func', doc=None):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-        if doc is None:
-            doc = inspect.getdoc(func) or ''
-        try:
-            NumpyDocString.__init__(self, doc)
-        except ValueError, e:
-            print '*'*78
-            print "ERROR: '%s' while parsing `%s`" % (e, self._f)
-            print '*'*78
-            #print "Docstring follows:"
-            #print doclines
-            #print '='*78
-
-        if not self['Signature']:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if not roles.has_key(self._role):
-                print "Warning: invalid role %s" % self._role
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-    def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
-        if not inspect.isclass(cls):
-            raise ValueError("Initialise using a class. Got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-        self._name = cls.__name__
-        self._func_doc = func_doc
-
-        if doc is None:
-            doc = pydoc.getdoc(cls)
-
-        NumpyDocString.__init__(self, doc)
-
-    @property
-    def methods(self):
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and callable(func)]
-
-    def __str__(self):
-        out = ''
-        out += super(ClassDoc, self).__str__()
-        out += "\n\n"
-
-        #for m in self.methods:
-        #    print "Parsing `%s`" % m
-        #    out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
-        #    out += '.. index::\n   single: %s; %s\n\n' % (self._name, m)
-
-        return out
diff --git a/doc/sphinxext/docscrape_sphinx.py b/doc/sphinxext/docscrape_sphinx.py
deleted file mode 100644
index eda6c35..0000000
--- a/doc/sphinxext/docscrape_sphinx.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-import re, inspect, textwrap, pydoc
-from docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-class SphinxDocString(NumpyDocString):
-    # string conversion routines
-    def _str_header(self, name, symbol='`'):
-        return ['.. rubric:: ' + name, '']
-
-    def _str_field_list(self, name):
-        return [':' + name + ':']
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        return ['']
-        if self['Signature']:
-            return ['``%s``' % self['Signature']] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        return self['Summary'] + ['']
-
-    def _str_extended_summary(self):
-        return self['Extended Summary'] + ['']
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_field_list(name)
-            out += ['']
-            for param,param_type,desc in self[name]:
-                out += self._str_indent(['**%s** : %s' % (param.strip(),
-                                                          param_type)])
-                out += ['']
-                out += self._str_indent(desc,8)
-                out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += ['']
-            content = textwrap.dedent("\n".join(self[name])).split("\n")
-            out += content
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        out = []
-        if self['See Also']:
-            see_also = super(SphinxDocString, self)._str_see_also(func_role)
-            out = ['.. seealso::', '']
-            out += self._str_indent(see_also[2:])
-        return out
-
-    def _str_warnings(self):
-        out = []
-        if self['Warnings']:
-            out = ['.. warning::', '']
-            out += self._str_indent(self['Warnings'])
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        if len(idx) == 0:
-            return out
-
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            elif section == 'refguide':
-                out += ['   single: %s' % (', '.join(references))]
-            else:
-                out += ['   %s: %s' % (section, ','.join(references))]
-        return out
-
-    def _str_references(self):
-        out = []
-        if self['References']:
-            out += self._str_header('References')
-            if isinstance(self['References'], str):
-                self['References'] = [self['References']]
-            out.extend(self['References'])
-            out += ['']
-        return out
-
-    def __str__(self, indent=0, func_role="obj"):
-        out = []
-        out += self._str_signature()
-        out += self._str_index() + ['']
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Attributes', 'Methods',
-                           'Returns','Raises'):
-            out += self._str_param_list(param_list)
-        out += self._str_warnings()
-        out += self._str_see_also(func_role)
-        out += self._str_section('Notes')
-        out += self._str_references()
-        out += self._str_section('Examples')
-        out = self._str_indent(out,indent)
-        return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
-    pass
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
-    pass
-
-def get_doc_object(obj, what=None, doc=None):
-    if what is None:
-        if inspect.isclass(obj):
-            what = 'class'
-        elif inspect.ismodule(obj):
-            what = 'module'
-        elif callable(obj):
-            what = 'function'
-        else:
-            what = 'object'
-    if what == 'class':
-        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
-    elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, '', doc=doc)
-    else:
-        if doc is None:
-            doc = pydoc.getdoc(obj)
-        return SphinxDocString(doc)
diff --git a/doc/sphinxext/flow_diagram.py b/doc/sphinxext/flow_diagram.py
new file mode 100644
index 0000000..06a43bd
--- /dev/null
+++ b/doc/sphinxext/flow_diagram.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+import os
+from os import path as op
+
+title = 'mne-python flow diagram'
+
+font_face = 'Arial'
+node_size = 12
+node_small_size = 9
+edge_size = 9
+sensor_color = '#7bbeca'
+source_color = '#ff6347'
+
+legend = """
+<<FONT POINT-SIZE="%s">
+<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="4" CELLPADDING="4">
+<TR><TD BGCOLOR="%s">    </TD><TD ALIGN="left">
+Sensor (M/EEG) space</TD></TR>
+<TR><TD BGCOLOR="%s">    </TD><TD ALIGN="left">
+Source (brain) space</TD></TR>
+</TABLE></FONT>>""" % (edge_size, sensor_color, source_color)
+legend = ''.join(legend.split('\n'))
+
+nodes = dict(
+    T1='T1',
+    flashes='Flash5/30',
+    trans='Head-MRI trans',
+    recon='Freesurfer surfaces',
+    bem='BEM',
+    src='Source space\nmne.SourceSpaces',
+    cov='Noise covariance\nmne.Covariance',
+    fwd='Forward solution\nmne.forward.Forward',
+    inv='Inverse operator\nmne.minimum_norm.InverseOperator',
+    stc='Source estimate\nmne.SourceEstimate',
+    raw='Raw data\nmne.io.Raw',
+    epo='Epoched data\nmne.Epochs',
+    evo='Averaged data\nmne.Evoked',
+    pre='Preprocessed data\nmne.io.Raw',
+    legend=legend,
+)
+
+sensor_space = ('raw', 'pre', 'epo', 'evo', 'cov')
+source_space = ('src', 'stc', 'bem', 'flashes', 'recon', 'T1')
+
+edges = (
+    ('T1', 'recon'),
+    ('flashes', 'bem'),
+    ('recon', 'bem'),
+    ('recon', 'src', 'mne.setup_source_space'),
+    ('src', 'fwd'),
+    ('bem', 'fwd'),
+    ('trans', 'fwd', 'mne.make_forward_solution'),
+    ('fwd', 'inv'),
+    ('cov', 'inv', 'mne.make_inverse_operator'),
+    ('inv', 'stc'),
+    ('evo', 'stc', 'mne.minimum_norm.apply_inverse'),
+    ('raw', 'pre', 'raw.filter\n'
+                   'mne.preprocessing.ICA\n'
+                   'mne.preprocessing.compute_proj_eog\n'
+                   'mne.preprocessing.compute_proj_ecg\n'
+                   '...'),
+    ('pre', 'epo', 'mne.Epochs'),
+    ('epo', 'evo', 'epochs.average'),
+    ('epo', 'cov', 'mne.compute_covariance'),
+)
+
+subgraphs = (
+    [('T1', 'flashes', 'recon', 'bem', 'src'),
+     ('<Structural information<BR/><FONT POINT-SIZE="%s"><I>'
+      'Freesurfer / MNE-C</I></FONT>>' % node_small_size)],
+)
+
+
+def setup(app):
+    app.connect('builder-inited', generate_flow_diagram)
+    app.add_config_value('make_flow_diagram', True, 'html')
+
+
+def setup_module():
+    # HACK: Stop nosetests running setup() above
+    pass
+
+
+def generate_flow_diagram(app):
+    out_dir = op.join(app.builder.outdir, '_static')
+    if not op.isdir(out_dir):
+        os.makedirs(out_dir)
+    out_fname = op.join(out_dir, 'mne-python_flow.svg')
+    make_flow_diagram = app is None or \
+        bool(app.builder.config.make_flow_diagram)
+    if not make_flow_diagram:
+        print('Skipping flow diagram, webpage will have a missing image')
+        return
+
+    import pygraphviz as pgv
+    g = pgv.AGraph(name=title, directed=True)
+
+    for key, label in nodes.items():
+        label = label.split('\n')
+        if len(label) > 1:
+            label[0] = ('<<FONT POINT-SIZE="%s">' % node_size
+                        + label[0] + '</FONT>')
+            for li in range(1, len(label)):
+                label[li] = ('<FONT POINT-SIZE="%s"><I>' % node_small_size
+                             + label[li] + '</I></FONT>')
+            label[-1] = label[-1] + '>'
+            label = '<BR/>'.join(label)
+        else:
+            label = label[0]
+        g.add_node(key, shape='plaintext', label=label)
+
+    # Create and customize nodes and edges
+    for edge in edges:
+        g.add_edge(*edge[:2])
+        e = g.get_edge(*edge[:2])
+        if len(edge) > 2:
+            e.attr['label'] = ('<<I>' +
+                               '<BR ALIGN="LEFT"/>'.join(edge[2].split('\n')) +
+                               '<BR ALIGN="LEFT"/></I>>')
+        e.attr['fontsize'] = edge_size
+
+    # Change colors
+    for these_nodes, color in zip((sensor_space, source_space),
+                                  (sensor_color, source_color)):
+        for node in these_nodes:
+            g.get_node(node).attr['fillcolor'] = color
+            g.get_node(node).attr['style'] = 'filled'
+
+    # Create subgraphs
+    for si, subgraph in enumerate(subgraphs):
+        g.add_subgraph(subgraph[0], 'cluster%s' % si,
+                       label=subgraph[1], color='black')
+
+    # Format (sub)graphs
+    for gr in g.subgraphs() + [g]:
+        for x in [gr.node_attr, gr.edge_attr]:
+            x['fontname'] = font_face
+    g.node_attr['shape'] = 'box'
+
+    # A couple of special ones
+    for ni, node in enumerate(('fwd', 'inv', 'trans')):
+        node = g.get_node(node)
+        node.attr['gradientangle'] = 270
+        colors = (source_color, sensor_color)
+        colors = colors if ni == 0 else colors[::-1]
+        node.attr['fillcolor'] = ':'.join(colors)
+        node.attr['style'] = 'filled'
+    del node
+    g.get_node('legend').attr.update(shape='plaintext', margin=0, rank='sink')
+    # put legend in same rank/level as inverse
+    l = g.add_subgraph(['legend', 'inv'], name='legendy')
+    l.graph_attr['rank'] = 'same'
+
+    g.layout('dot')
+    g.draw(out_fname, format='svg')
+    return g
+
+
+# This is useful for testing/iterating to see what the result looks like
+if __name__ == '__main__':
+    from mne.io.constants import Bunch
+    out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'build', 'html'))
+    app = Bunch(builder=Bunch(outdir=out_dir,
+                              config=Bunch(make_flow_diagram=True)))
+    g = generate_flow_diagram(app)
diff --git a/doc/sphinxext/gen_rst.py b/doc/sphinxext/gen_rst.py
deleted file mode 100644
index e204334..0000000
--- a/doc/sphinxext/gen_rst.py
+++ /dev/null
@@ -1,946 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8
-
-"""
-Example generation modified from the scikit learn
-
-Generate the rst files for the examples by iterating over the python
-example files.
-
-Files that generate images should start with 'plot'
-
-"""
-from time import time
-import os
-import shutil
-import traceback
-import glob
-import sys
-from StringIO import StringIO
-import cPickle
-import re
-import urllib2
-import gzip
-import posixpath
-
-try:
-    from PIL import Image
-except:
-    import Image
-
-import matplotlib
-matplotlib.use('Agg')
-
-import token
-import tokenize
-
-MAX_NB_LINES_STDOUT = 20
-
-###############################################################################
-# A tee object to redict streams to multiple outputs
-
-
-class Tee(object):
-
-    def __init__(self, file1, file2):
-        self.file1 = file1
-        self.file2 = file2
-
-    def write(self, data):
-        self.file1.write(data)
-        self.file2.write(data)
-
-    def flush(self):
-        self.file1.flush()
-        self.file2.flush()
-
-###############################################################################
-# Documentation link resolver objects
-
-
-def get_data(url):
-    """Helper function to get data over http or from a local file"""
-    if url.startswith('http://'):
-        try:
-            resp = urllib2.urlopen(url)
-            encoding = resp.headers.dict.get('content-encoding', 'plain')
-            data = resp.read()
-            if encoding == 'plain':
-                pass
-            elif encoding == 'gzip':
-                data = StringIO(data)
-                data = gzip.GzipFile(fileobj=data).read()
-            else:
-                raise RuntimeError('unknown encoding')
-        except urllib2.HTTPError as err:
-            print 'Error downloading %s: %s' % (url, str(err))
-            return ''
-    else:
-        with open(url, 'r') as fid:
-            data = fid.read()
-        fid.close()
-
-    return data
-
-
-def parse_sphinx_searchindex(searchindex):
-    """Parse a Sphinx search index
-
-    Parameters
-    ----------
-    searchindex : str
-        The Sphinx search index (contents of searchindex.js)
-
-    Returns
-    -------
-    filenames : list of str
-        The file names parsed from the search index.
-    objects : dict
-        The objects parsed from the search index.
-    """
-    def _select_block(str_in, start_tag, end_tag):
-        """Select first block delimited by start_tag and end_tag"""
-        start_pos = str_in.find(start_tag)
-        if start_pos < 0:
-            raise ValueError('start_tag not found')
-        depth = 0
-        for pos in range(start_pos, len(str_in)):
-            if str_in[pos] == start_tag:
-                depth += 1
-            elif str_in[pos] == end_tag:
-                depth -= 1
-
-            if depth == 0:
-                break
-        sel = str_in[start_pos + 1:pos]
-        return sel
-
-    def _parse_dict_recursive(dict_str):
-        """Parse a dictionary from the search index"""
-        dict_out = dict()
-        pos_last = 0
-        pos = dict_str.find(':')
-        while pos >= 0:
-            key = dict_str[pos_last:pos]
-            if dict_str[pos + 1] == '[':
-                # value is a list
-                pos_tmp = dict_str.find(']', pos + 1)
-                if pos_tmp < 0:
-                    raise RuntimeError('error when parsing dict')
-                value = dict_str[pos + 2: pos_tmp].split(',')
-                # try to convert elements to int
-                for i in range(len(value)):
-                    try:
-                        value[i] = int(value[i])
-                    except ValueError:
-                        pass
-            elif dict_str[pos + 1] == '{':
-                # value is another dictionary
-                subdict_str = _select_block(dict_str[pos:], '{', '}')
-                value = _parse_dict_recursive(subdict_str)
-                pos_tmp = pos + len(subdict_str)
-            else:
-                raise ValueError('error when parsing dict: unknown elem')
-
-            key = key.strip('"')
-            if len(key) > 0:
-                dict_out[key] = value
-
-            pos_last = dict_str.find(',', pos_tmp)
-            if pos_last < 0:
-                break
-            pos_last += 1
-            pos = dict_str.find(':', pos_last)
-
-        return dict_out
-
-    # parse objects
-    query = 'objects:'
-    pos = searchindex.find(query)
-    if pos < 0:
-        raise ValueError('"objects:" not found in search index')
-
-    sel = _select_block(searchindex[pos:], '{', '}')
-    objects = _parse_dict_recursive(sel)
-
-    # parse filenames
-    query = 'filenames:'
-    pos = searchindex.find(query)
-    if pos < 0:
-        raise ValueError('"filenames:" not found in search index')
-    filenames = searchindex[pos + len(query) + 1:]
-    filenames = filenames[:filenames.find(']')]
-    filenames = [f.strip('"') for f in filenames.split(',')]
-
-    return filenames, objects
-
-
-class SphinxDocLinkResolver(object):
-    """ Resolve documentation links using searchindex.js generated by Sphinx
-
-    Parameters
-    ----------
-    doc_url : str
-        The base URL of the project website.
-    searchindex : str
-        Filename of searchindex, relative to doc_url.
-    extra_modules_test : list of str
-        List of extra module names to test.
-    relative : bool
-        Return relative links (only useful for links to documentation of this
-        package).
-    """
-
-    def __init__(self, doc_url, searchindex='searchindex.js',
-                 extra_modules_test=[], relative=False):
-        self.doc_url = doc_url
-        self.relative = relative
-        self._link_cache = {}
-
-        self.extra_modules_test = extra_modules_test
-        self._page_cache = {}
-        if doc_url.startswith('http://'):
-            if relative:
-                raise ValueError('Relative links are only supported for local '
-                                 'URLs (doc_url cannot start with "http://)"')
-            searchindex_url = doc_url + '/' + searchindex
-        else:
-            searchindex_url = os.path.join(doc_url, searchindex)
-
-        # detect if we are using relative links on a Windows system
-        if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
-            if not relative:
-                raise ValueError('You have to use relative=True for the local'
-                                 'package on a Windows system.')
-            self._is_windows = True
-        else:
-            self._is_windows = False
-
-        # download and initialize the search index
-        sindex = get_data(searchindex_url)
-        filenames, objects = parse_sphinx_searchindex(sindex)
-
-        self._searchindex = dict(filenames=filenames, objects=objects)
-
-    def _get_link(self, cobj):
-        """Get a valid link, False if not found"""
-
-        fname_idx = None
-        modules_test = [cobj['module_short']] + self.extra_modules_test
-
-        for module in modules_test:
-            full_name = module + '.' + cobj['name']
-            if full_name in self._searchindex['objects']:
-                value = self._searchindex['objects'][full_name]
-                if isinstance(value, dict):
-                    value = value[value.keys()[0]]
-                fname_idx = value[0]
-            elif module in self._searchindex['objects']:
-                value = self._searchindex['objects'][module]
-                if cobj['name'] in value.keys():
-                    fname_idx = value[cobj['name']][0]
-            if fname_idx is not None:
-                break
-
-        if fname_idx is not None:
-            fname = self._searchindex['filenames'][fname_idx] + '.html'
-
-            if self._is_windows:
-                fname = fname.replace('/', '\\')
-                link = os.path.join(self.doc_url, fname)
-            else:
-                link = posixpath.join(self.doc_url, fname)
-
-            if link in self._page_cache:
-                html = self._page_cache[link]
-            else:
-                html = get_data(link)
-                self._page_cache[link] = html
-
-            # test if cobj appears in page
-            url = False
-            for comb_name in ['%s.%s' % (module, cobj['name']) for module
-                              in modules_test]:
-                if html.find(comb_name) >= 0:
-                    url = link + '#' + comb_name
-            link = url
-        else:
-            link = False
-
-        return link
-
-    def resolve(self, cobj, this_url):
-        """Resolve the link to the documentation, returns None if not found
-
-        Parameters
-        ----------
-        cobj : dict
-            Dict with information about the "code object" for which we are
-            resolving a link.
-            cobi['name'] : function or class name (str)
-            cobj['module_short'] : shortened module name (str)
-            cobj['module'] : module name (str)
-        this_url: str
-            URL of the current page. Needed to construct relative URLs
-            (only used if relative=True in constructor).
-
-        Returns
-        -------
-        link : str | None
-            The link (URL) to the documentation.
-        """
-        full_name = cobj['module_short'] + '.' + cobj['name']
-        link = self._link_cache.get(full_name, None)
-        if link is None:
-            # we don't have it cached
-            link = self._get_link(cobj)
-            # cache it for the future
-            self._link_cache[full_name] = link
-
-        if link is False or link is None:
-            # failed to resolve
-            return None
-
-        if self.relative:
-            link = os.path.relpath(link, start=this_url)
-            if self._is_windows:
-                # replace '\' with '/' so it on the web
-                link = link.replace('\\', '/')
-
-            # for some reason, the relative link goes one directory too high up
-            link = link[3:]
-
-        return link
-
-
-###############################################################################
-rst_template = """
-
-.. _example_%(short_fname)s:
-
-%(docstring)s
-
-**Python source code:** :download:`%(fname)s <%(fname)s>`
-
-.. literalinclude:: %(fname)s
-    :lines: %(end_row)s-
-    """
-
-plot_rst_template = """
-
-.. _example_%(short_fname)s:
-
-%(docstring)s
-
-%(image_list)s
-
-%(stdout)s
-
-**Python source code:** :download:`%(fname)s <%(fname)s>`
-
-.. literalinclude:: %(fname)s
-    :lines: %(end_row)s-
-
-**Total running time of the example:** %(time_elapsed) 4i seconds
-
-.. raw:: html
-
-    <div class="social-button-container">
-        <div class="social-button">
-            <a href="https://twitter.com/share" class="twitter-share-button">Tweet</a>
-        </div>
-        <div class="social-button">
-            <g:plusone annotation="inline" width="120" size="medium"></g:plusone>
-        </div>
-        <div class="social-button">
-            <div id="fb-root"></div>
-            <script>(function(d, s, id) {
-                var js, fjs = d.getElementsByTagName(s)[0];
-                if (d.getElementById(id)) return;
-                js = d.createElement(s); js.id = id;
-                js.src = "//connect.facebook.net/en_US/all.js#xfbml=1";
-                fjs.parentNode.insertBefore(js, fjs);
-                }(document, 'script', 'facebook-jssdk'));
-            </script>
-            <div class="fb-like" data-send="false" data-width="450" data-show-faces="false"></div>
-        </div>
-    </div>
-    """
-
-# The following strings are used when we have several pictures: we use
-# an html div tag that our CSS uses to turn the lists into horizontal
-# lists.
-HLIST_HEADER = """
-.. rst-class:: horizontal
-
-"""
-
-HLIST_IMAGE_TEMPLATE = """
-    *
-
-      .. image:: images/%s
-            :scale: 47
-"""
-
-SINGLE_IMAGE = """
-.. image:: images/%s
-    :align: center
-"""
-
-
-def extract_docstring(filename):
-    """ Extract a module-level docstring, if any
-    """
-    lines = file(filename).readlines()
-    start_row = 0
-    if lines[0].startswith('#!'):
-        lines.pop(0)
-        start_row = 1
-
-    docstring = ''
-    first_par = ''
-    tokens = tokenize.generate_tokens(iter(lines).next)
-    for tok_type, tok_content, _, (erow, _), _ in tokens:
-        tok_type = token.tok_name[tok_type]
-        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
-            continue
-        elif tok_type == 'STRING':
-            docstring = eval(tok_content)
-            # If the docstring is formatted with several paragraphs, extract
-            # the first one:
-            paragraphs = '\n'.join(line.rstrip() for line in
-                                   docstring.split('\n')).split('\n\n')
-            if len(paragraphs) > 0:
-                first_par = paragraphs[0]
-        break
-    return docstring, first_par, erow + 1 + start_row
-
-
-def generate_example_rst(app):
-    """ Generate the list of examples, as well as the contents of
-        examples.
-    """
-    root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
-    example_dir = os.path.abspath(app.builder.srcdir + '/../../' + 'examples')
-    try:
-        plot_gallery = eval(app.builder.config.plot_gallery)
-    except TypeError:
-        plot_gallery = bool(app.builder.config.plot_gallery)
-    if not os.path.exists(example_dir):
-        os.makedirs(example_dir)
-    if not os.path.exists(root_dir):
-        os.makedirs(root_dir)
-
-    # we create an index.rst with all examples
-    fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
-    #Note: The sidebar button has been removed from the examples page for now
-    #      due to how it messes up the layout. Will be fixed at a later point
-    fhindex.write("""\
-
-.. raw:: html
-
-
-    <style type="text/css">
-
-    div#sidebarbutton {
-        display: none;
-    }
-
-    .figure {
-        float: left;
-        margin: 10px;
-        width: auto;
-        height: 200px;
-        width: 180px;
-    }
-
-    .figure img {
-        display: inline;
-        }
-
-    .figure .caption {
-        width: 180px;
-        text-align: center !important;
-    }
-    </style>
-
-Examples
-========
-
-.. _examples-index:
-""")
-    # Here we don't use an os.walk, but we recurse only twice: flat is
-    # better than nested.
-    generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
-    for dir in sorted(os.listdir(example_dir)):
-        if os.path.isdir(os.path.join(example_dir, dir)):
-            generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
-    fhindex.flush()
-
-
-def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
-    """ Generate the rst file for an example directory.
-    """
-    if not dir == '.':
-        target_dir = os.path.join(root_dir, dir)
-        src_dir = os.path.join(example_dir, dir)
-    else:
-        target_dir = root_dir
-        src_dir = example_dir
-    if not os.path.exists(os.path.join(src_dir, 'README.txt')):
-        print 80 * '_'
-        print ('Example directory %s does not have a README.txt file'
-               % src_dir)
-        print 'Skipping this directory'
-        print 80 * '_'
-        return
-    fhindex.write("""
-
-
-%s
-
-
-""" % file(os.path.join(src_dir, 'README.txt')).read())
-    if not os.path.exists(target_dir):
-        os.makedirs(target_dir)
-
-    def sort_key(a):
-        # put last elements without a plot
-        if not a.startswith('plot') and a.endswith('.py'):
-            return 'zz' + a
-        return a
-    for fname in sorted(os.listdir(src_dir), key=sort_key):
-        if not os.path.split(fname)[-1].startswith('plot_'):
-            continue
-        if fname.endswith('py'):
-            generate_file_rst(fname, target_dir, src_dir, plot_gallery)
-            thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
-            link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
-            fhindex.write('.. figure:: %s\n' % thumb)
-            if link_name.startswith('._'):
-                link_name = link_name[2:]
-            if dir != '.':
-                fhindex.write('   :target: ./%s/%s.html\n\n' % (dir,
-                                                                fname[:-3]))
-            else:
-                fhindex.write('   :target: ./%s.html\n\n' % link_name[:-3])
-            fhindex.write("""   :ref:`example_%s`
-
-.. toctree::
-   :hidden:
-
-   %s/%s
-
-""" % (link_name, dir, fname[:-3]))
-    fhindex.write("""
-.. raw:: html
-
-    <div style="clear: both"></div>
-    """)  # clear at the end of the section
-
-# modules for which we embed links into example code
-DOCMODULES = ['mne', 'matplotlib', 'numpy', 'scipy', 'mayavi']
-
-
-def make_thumbnail(in_fname, out_fname, width, height):
-    """Make a thumbnail with the same aspect ratio centered in an
-       image with a given width and height
-    """
-    img = Image.open(in_fname)
-    width_in, height_in = img.size
-    scale_w = width / float(width_in)
-    scale_h = height / float(height_in)
-
-    if height_in * scale_w <= height:
-        scale = scale_w
-    else:
-        scale = scale_h
-
-    width_sc = int(round(scale * width_in))
-    height_sc = int(round(scale * height_in))
-
-    # resize the image
-    img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
-
-    # insert centered
-    thumb = Image.new('RGB', (width, height), (255, 255, 255))
-    pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
-    thumb.paste(img, pos_insert)
-
-    thumb.save(out_fname)
-
-
-def scale_image(in_fname, max_width):
-    """Scale image such that width <= max_width
-    """
-    img = Image.open(in_fname)
-    width_in, height_in = img.size
-
-    if width_in <= max_width:
-        return
-
-    scale = max_width / float(width_in)
-
-    width_sc = int(round(scale * width_in))
-    height_sc = int(round(scale * height_in))
-
-    # resize the image
-    img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
-
-    # overwrite the image
-    img.save(in_fname)
-
-
-def get_short_module_name(module_name, obj_name):
-    """ Get the shortest possible module name """
-    parts = module_name.split('.')
-    short_name = module_name
-    for i in range(len(parts) - 1, 0, -1):
-        short_name = '.'.join(parts[:i])
-        try:
-            exec('from %s import %s' % (short_name, obj_name))
-        except ImportError:
-            # get the last working module name
-            short_name = '.'.join(parts[:(i + 1)])
-            break
-    return short_name
-
-
-def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
-    """ Generate the rst file for a given example.
-    """
-    base_image_name = os.path.splitext(fname)[0]
-    image_fname = '%s_%%s.png' % base_image_name
-
-    this_template = rst_template
-    last_dir = os.path.split(src_dir)[-1]
-    # to avoid leading . in file names, and wrong names in links
-    if last_dir == '.' or last_dir == 'examples':
-        last_dir = ''
-    else:
-        last_dir += '_'
-    short_fname = last_dir + fname
-    src_file = os.path.join(src_dir, fname)
-    example_file = os.path.join(target_dir, fname)
-    shutil.copyfile(src_file, example_file)
-
-    # The following is a list containing all the figure names
-    figure_list = []
-
-    image_dir = os.path.join(target_dir, 'images')
-    thumb_dir = os.path.join(image_dir, 'thumb')
-    if not os.path.exists(image_dir):
-        os.makedirs(image_dir)
-    if not os.path.exists(thumb_dir):
-        os.makedirs(thumb_dir)
-    image_path = os.path.join(image_dir, image_fname)
-    stdout_path = os.path.join(image_dir,
-                               'stdout_%s.txt' % base_image_name)
-    time_path = os.path.join(image_dir,
-                             'time_%s.txt' % base_image_name)
-    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
-    time_elapsed = 0
-    if plot_gallery:
-        # generate the plot as png image if file name
-        # starts with plot and if it is more recent than an
-        # existing image.
-        first_image_file = image_path % 1
-        if os.path.exists(stdout_path):
-            stdout = open(stdout_path).read()
-        else:
-            stdout = ''
-        if os.path.exists(time_path):
-            time_elapsed = float(open(time_path).read())
-
-        if (not os.path.exists(first_image_file) or
-                os.stat(first_image_file).st_mtime
-                <= os.stat(src_file).st_mtime):
-            # We need to execute the code
-            print 'plotting %s' % fname
-            t0 = time()
-            import matplotlib.pyplot as plt
-            plt.close('all')
-
-            try:
-                from mayavi import mlab
-            except Exception, e:
-                from enthought.mayavi import mlab
-            mlab.close(all=True)
-
-            cwd = os.getcwd()
-            try:
-                # First CD in the original example dir, so that any file
-                # created by the example get created in this directory
-                orig_stdout = sys.stdout
-                os.chdir(os.path.dirname(src_file))
-                my_buffer = StringIO()
-                my_stdout = Tee(sys.stdout, my_buffer)
-                sys.stdout = my_stdout
-                my_globals = {'pl': plt}
-                execfile(os.path.basename(src_file), my_globals)
-                time_elapsed = time() - t0
-                sys.stdout = orig_stdout
-                my_stdout = my_buffer.getvalue()
-
-                # get variables so we can later add links to the documentation
-                example_code_obj = {}
-                for var_name, var in my_globals.iteritems():
-                    if not hasattr(var, '__module__'):
-                        continue
-                    if not isinstance(var.__module__, basestring):
-                        continue
-                    if var.__module__.split('.')[0] not in DOCMODULES:
-                        continue
-
-                    # get the type as a string with other things stripped
-                    tstr = str(type(var))
-                    tstr = (tstr[tstr.find('\'')
-                            + 1:tstr.rfind('\'')].split('.')[-1])
-                    # get shortened module name
-                    module_short = get_short_module_name(var.__module__,
-                                                         tstr)
-                    cobj = {'name': tstr, 'module': var.__module__,
-                            'module_short': module_short,
-                            'obj_type': 'object'}
-                    example_code_obj[var_name] = cobj
-
-                # find functions so we can later add links to the documentation
-                funregex = re.compile('[\w.]+\(')
-                fun_exclude = ['print']
-                with open(src_file, 'rt') as fid:
-                    for line in fid.readlines():
-                        if line.startswith('#'):
-                            continue
-                        for match in funregex.findall(line):
-                            fun_name = match[:-1]
-                            if fun_name in fun_exclude:
-                                continue
-                            try:
-                                exec('this_fun = %s' % fun_name, my_globals)
-                            except Exception as err:
-                                print ('Error: extracting function %s failed: '
-                                       '%s' % (fun_name, str(err)))
-                                continue
-                            this_fun = my_globals['this_fun']
-                            if not callable(this_fun):
-                                continue
-                            if not hasattr(this_fun, '__module__'):
-                                continue
-                            if not isinstance(this_fun.__module__, basestring):
-                                continue
-                            if (this_fun.__module__.split('.')[0]
-                                    not in DOCMODULES):
-                                continue
-
-                            # get shortened module name
-                            fun_name_short = fun_name.split('.')[-1]
-                            module_short = get_short_module_name(
-                                this_fun.__module__, fun_name_short)
-                            cobj = {'name': fun_name_short,
-                                    'module': this_fun.__module__,
-                                    'module_short': module_short,
-                                    'obj_type': 'function'}
-                            example_code_obj[fun_name] = cobj
-
-                fid.close()
-                if len(example_code_obj) > 0:
-                    # save the dictionary, so we can later add hyperlinks
-                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
-                    with open(codeobj_fname, 'wb') as fid:
-                        cPickle.dump(example_code_obj, fid,
-                                     cPickle.HIGHEST_PROTOCOL)
-                    fid.close()
-                if '__doc__' in my_globals:
-                    # The __doc__ is often printed in the example, we
-                    # don't with to echo it
-                    my_stdout = my_stdout.replace(my_globals['__doc__'],
-                                                  '')
-                my_stdout = my_stdout.strip()
-                if my_stdout:
-                    output_lines = my_stdout.split('\n')
-                    if len(output_lines) > MAX_NB_LINES_STDOUT:
-                        output_lines = output_lines[:MAX_NB_LINES_STDOUT]
-                        output_lines.append('...')
-                    stdout = ('**Script output**::\n\n  %s\n\n'
-                              % ('\n  '.join(output_lines)))
-                open(stdout_path, 'w').write(stdout)
-                open(time_path, 'w').write('%f' % time_elapsed)
-                os.chdir(cwd)
-
-                # In order to save every figure we have two solutions :
-                # * iterate from 1 to infinity and call plt.fignum_exists(n)
-                #   (this requires the figures to be numbered
-                #    incrementally: 1, 2, 3 and not 1, 2, 5)
-                # * iterate over [fig_mngr.num for fig_mngr in
-                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
-                last_fig_num = 0
-                for fig_num in (fig_mngr.num for fig_mngr in
-                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
-                    # Set the fig_num figure as the current figure as we can't
-                    # save a figure that's not the current figure.
-                    plt.figure(fig_num)
-                    # hack to keep black bg
-                    facecolor = plt.gcf().get_facecolor()
-                    if facecolor == (0.0, 0.0, 0.0, 1.0):
-                        plt.savefig(image_path % fig_num, facecolor='black')
-                    else:
-                        plt.savefig(image_path % fig_num)
-
-                    # make sure the image is not too large
-                    scale_image(image_path % fig_num, 850)
-                    figure_list.append(image_fname % fig_num)
-                    last_fig_num = fig_num
-
-                e = mlab.get_engine()
-                for scene in e.scenes:
-                    last_fig_num += 1
-                    mlab.savefig(image_path % last_fig_num)
-                    # make sure the image is not too large
-                    scale_image(image_path % last_fig_num, 850)
-                    figure_list.append(image_fname % last_fig_num)
-                    mlab.close(scene)
-
-            except:
-                print 80 * '_'
-                print '%s is not compiling:' % fname
-                traceback.print_exc()
-                print 80 * '_'
-            finally:
-                os.chdir(cwd)
-                sys.stdout = orig_stdout
-
-            print " - time elapsed : %.2g sec" % time_elapsed
-        else:
-            figure_list = [f[len(image_dir):]
-                           for f in glob.glob(image_path % '[1-9]')]
-
-        # generate thumb file
-        this_template = plot_rst_template
-        if os.path.exists(first_image_file):
-            make_thumbnail(first_image_file, thumb_file, 180, 120)
-
-    if not os.path.exists(thumb_file):
-        # use the default thumbnail
-        make_thumbnail('source/_images/mne_helmet.png', thumb_file, 180, 120)
-
-    docstring, short_desc, end_row = extract_docstring(example_file)
-
-    # Depending on whether we have one or more figures, we're using a
-    # horizontal list or a single rst call to 'image'.
-    if len(figure_list) == 1:
-        figure_name = figure_list[0]
-        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
-    else:
-        image_list = HLIST_HEADER
-        for figure_name in figure_list:
-            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
-
-    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
-    f.write(this_template % locals())
-    f.flush()
-
-
-def embed_code_links(app, exception):
-    """Embed hyperlinks to documentation into example code"""
-    if exception is not None:
-        return
-    print 'Embedding documentation hyperlinks in examples..'
-
-    # Add resolvers for the packages for which we want to show links
-    doc_resolvers = {}
-    doc_resolvers['mne'] = SphinxDocLinkResolver(app.builder.outdir,
-                                                 relative=True)
-
-    doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
-        'http://matplotlib.org')
-
-    doc_resolvers['numpy'] = SphinxDocLinkResolver(
-        'http://docs.scipy.org/doc/numpy-1.6.0')
-
-    doc_resolvers['scipy'] = SphinxDocLinkResolver(
-        'http://docs.scipy.org/doc/scipy-0.11.0/reference')
-
-    doc_resolvers['mayavi'] = SphinxDocLinkResolver(
-        'http://docs.enthought.com/mayavi/mayavi',
-        extra_modules_test=['mayavi.mlab'])
-
-    example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
-    html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
-                                                    'auto_examples'))
-    # patterns for replacement
-    link_pattern = '<a href="%s">%s</a>'
-    orig_pattern = '<span class="n">%s</span>'
-    period = '<span class="o">.</span>'
-
-    for dirpath, _, filenames in os.walk(html_example_dir):
-        for fname in filenames:
-            print '\tprocessing: %s' % fname
-            full_fname = os.path.join(html_example_dir, dirpath, fname)
-            subpath = dirpath[len(html_example_dir) + 1:]
-            pickle_fname = os.path.join(example_dir, subpath,
-                                        fname[:-5] + '_codeobj.pickle')
-
-            if os.path.exists(pickle_fname):
-                # we have a pickle file with the objects to embed links for
-                with open(pickle_fname, 'rb') as fid:
-                    example_code_obj = cPickle.load(fid)
-                fid.close()
-                str_repl = {}
-                # generate replacement strings with the links
-                for name, cobj in example_code_obj.iteritems():
-                    this_module = cobj['module'].split('.')[0]
-
-                    if this_module not in doc_resolvers:
-                        continue
-
-                    link = doc_resolvers[this_module].resolve(cobj,
-                                                              full_fname)
-                    if link is not None:
-                        parts = name.split('.')
-                        name_html = orig_pattern % parts[0]
-                        for part in parts[1:]:
-                            name_html += period + orig_pattern % part
-                        str_repl[name_html] = link_pattern % (link, name_html)
-                # do the replacement in the html file
-                if len(str_repl) > 0:
-                    with open(full_fname, 'rt') as fid:
-                        lines_in = fid.readlines()
-                    fid.close()
-                    with open(full_fname, 'wt') as fid:
-                        for line in lines_in:
-                            for name, link in str_repl.iteritems():
-                                line = line.replace(name.encode('utf-8'),
-                                                    link.encode('utf-8'))
-                            fid.write(line)
-                    fid.close()
-
-    print '[done]'
-
-
-def setup(app):
-    app.connect('builder-inited', generate_example_rst)
-    app.add_config_value('plot_gallery', True, 'html')
-
-    # embed links after build is finished
-    app.connect('build-finished', embed_code_links)
-
-    # Sphinx hack: sphinx copies generated images to the build directory
-    #  each time the docs are made.  If the desired image name already
-    #  exists, it appends a digit to prevent overwrites.  The problem is,
-    #  the directory is never cleared.  This means that each time you build
-    #  the docs, the number of images in the directory grows.
-    #
-    # This question has been asked on the sphinx development list, but there
-    #  was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
-    #
-    # The following is a hack that prevents this behavior by clearing the
-    #  image build directory each time the docs are built.  If sphinx
-    #  changes their layout between versions, this will not work (though
-    #  it should probably not cause a crash).  Tested successfully
-    #  on Sphinx 1.0.7
-    build_image_dir = 'build/html/_images'
-    if os.path.exists(build_image_dir):
-        filelist = os.listdir(build_image_dir)
-        for filename in filelist:
-            if filename.endswith('png'):
-                os.remove(os.path.join(build_image_dir, filename))
diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpy_ext/docscrape.py
index ad5998c..84caa06 100644
--- a/doc/sphinxext/numpy_ext/docscrape.py
+++ b/doc/sphinxext/numpy_ext/docscrape.py
@@ -6,8 +6,13 @@ import inspect
 import textwrap
 import re
 import pydoc
-from StringIO import StringIO
 from warnings import warn
+# Try Python 2 first, otherwise load from Python 3
+try:
+    from StringIO import StringIO
+except:
+    from io import StringIO
+
 
 class Reader(object):
     """A line-based string reader.
@@ -21,10 +26,10 @@ class Reader(object):
            String with lines separated by '\n'.
 
         """
-        if isinstance(data,list):
+        if isinstance(data, list):
             self._str = data
         else:
-            self._str = data.split('\n') # store string as list of lines
+            self._str = data.split('\n')  # store string as list of lines
 
         self.reset()
 
@@ -32,7 +37,7 @@ class Reader(object):
         return self._str[n]
 
     def reset(self):
-        self._l = 0 # current line nr
+        self._l = 0  # current line nr
 
     def read(self):
         if not self.eof():
@@ -59,11 +64,12 @@ class Reader(object):
                 return self[start:self._l]
             self._l += 1
             if self.eof():
-                return self[start:self._l+1]
+                return self[start:self._l + 1]
         return []
 
     def read_to_next_empty_line(self):
         self.seek_next_non_empty_line()
+
         def is_empty(line):
             return not line.strip()
         return self.read_to_condition(is_empty)
@@ -73,7 +79,7 @@ class Reader(object):
             return (line.strip() and (len(line.lstrip()) == len(line)))
         return self.read_to_condition(is_unindented)
 
-    def peek(self,n=0):
+    def peek(self, n=0):
         if self._l + n < len(self._str):
             return self[self._l + n]
         else:
@@ -109,11 +115,11 @@ class NumpyDocString(object):
 
         self._parse()
 
-    def __getitem__(self,key):
+    def __getitem__(self, key):
         return self._parsed_data[key]
 
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
+    def __setitem__(self, key, val):
+        if key not in self._parsed_data:
             warn("Unknown section %s" % key)
         else:
             self._parsed_data[key] = val
@@ -129,25 +135,27 @@ class NumpyDocString(object):
         if l1.startswith('.. index::'):
             return True
 
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+        l2 = self._doc.peek(1).strip()   # ---------- or ==========
+        return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
 
-    def _strip(self,doc):
+    def _strip(self, doc):
         i = 0
         j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
+        for i, line in enumerate(doc):
+            if line.strip():
+                break
 
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
+        for j, line in enumerate(doc[::-1]):
+            if line.strip():
+                break
 
-        return doc[i:len(doc)-j]
+        return doc[i:len(doc) - j]
 
     def _read_to_next_section(self):
         section = self._doc.read_to_next_empty_line()
 
         while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
+            if not self._doc.peek(-1).strip():  # previous line was empty
                 section += ['']
 
             section += self._doc.read_to_next_empty_line()
@@ -159,14 +167,14 @@ class NumpyDocString(object):
             data = self._read_to_next_section()
             name = data[0].strip()
 
-            if name.startswith('..'): # index section
+            if name.startswith('..'):  # index section
                 yield name, data[1:]
             elif len(data) < 2:
                 yield StopIteration
             else:
                 yield name, self._strip(data[2:])
 
-    def _parse_param_list(self,content):
+    def _parse_param_list(self, content):
         r = Reader(content)
         params = []
         while not r.eof():
@@ -179,13 +187,13 @@ class NumpyDocString(object):
             desc = r.read_to_next_unindented_line()
             desc = dedent_lines(desc)
 
-            params.append((arg_name,arg_type,desc))
+            params.append((arg_name, arg_type, desc))
 
         return params
 
-
     _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
                            r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+
     def _parse_see_also(self, content):
         """
         func_name : Descriptive text
@@ -218,7 +226,8 @@ class NumpyDocString(object):
         rest = []
 
         for line in content:
-            if not line.strip(): continue
+            if not line.strip():
+                continue
 
             m = self._name_rgx.match(line)
             if m and line[m.end():].strip().startswith(':'):
@@ -280,9 +289,10 @@ class NumpyDocString(object):
         self._doc.reset()
         self._parse_summary()
 
-        for (section,content) in self._read_sections():
+        for (section, content) in self._read_sections():
             if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
+                section = ' '.join([s.capitalize()
+                                    for s in section.split(' ')])
             if section in ('Parameters', 'Attributes', 'Methods',
                            'Returns', 'Raises', 'Warns'):
                 self[section] = self._parse_param_list(content)
@@ -296,17 +306,17 @@ class NumpyDocString(object):
     # string conversion routines
 
     def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
+        return [name, len(name) * symbol]
 
     def _str_indent(self, doc, indent=4):
         out = []
         for line in doc:
-            out += [' '*indent + line]
+            out += [' ' * indent + line]
         return out
 
     def _str_signature(self):
         if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
+            return [self['Signature'].replace('*', '\*')] + ['']
         else:
             return ['']
 
@@ -326,7 +336,7 @@ class NumpyDocString(object):
         out = []
         if self[name]:
             out += self._str_header(name)
-            for param,param_type,desc in self[name]:
+            for param, param_type, desc in self[name]:
                 out += ['%s : %s' % (param, param_type)]
                 out += self._str_indent(desc)
             out += ['']
@@ -341,7 +351,8 @@ class NumpyDocString(object):
         return out
 
     def _str_see_also(self, func_role):
-        if not self['See Also']: return []
+        if not self['See Also']:
+            return []
         out = []
         out += self._str_header("See Also")
         last_had_desc = True
@@ -368,7 +379,7 @@ class NumpyDocString(object):
     def _str_index(self):
         idx = self['index']
         out = []
-        out += ['.. index:: %s' % idx.get('default','')]
+        out += ['.. index:: %s' % idx.get('default', '')]
         for section, references in idx.iteritems():
             if section == 'default':
                 continue
@@ -380,11 +391,11 @@ class NumpyDocString(object):
         out += self._str_signature()
         out += self._str_summary()
         out += self._str_extended_summary()
-        for param_list in ('Parameters','Returns','Raises'):
+        for param_list in ('Parameters', 'Returns', 'Raises'):
             out += self._str_param_list(param_list)
         out += self._str_section('Warnings')
         out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
+        for s in ('Notes', 'References', 'Examples'):
             out += self._str_section(s)
         for param_list in ('Attributes', 'Methods'):
             out += self._str_param_list(param_list)
@@ -392,25 +403,27 @@ class NumpyDocString(object):
         return '\n'.join(out)
 
 
-def indent(str,indent=4):
-    indent_str = ' '*indent
+def indent(str, indent=4):
+    indent_str = ' ' * indent
     if str is None:
         return indent_str
     lines = str.split('\n')
     return '\n'.join(indent_str + l for l in lines)
 
+
 def dedent_lines(lines):
     """Deindent a list of lines maximally"""
     return textwrap.dedent("\n".join(lines)).split("\n")
 
+
 def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
+    return text + '\n' + style * len(text) + '\n'
 
 
 class FunctionDoc(NumpyDocString):
     def __init__(self, func, role='func', doc=None, config={}):
         self._f = func
-        self._role = role # e.g. "func" or "meth"
+        self._role = role  # e.g. "func" or "meth"
 
         if doc is None:
             if func is None:
@@ -424,9 +437,9 @@ class FunctionDoc(NumpyDocString):
                 # try to read signature
                 argspec = inspect.getargspec(func)
                 argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
+                argspec = argspec.replace('*', '\*')
                 signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
+            except TypeError as e:
                 signature = '%s()' % func_name
             self['Signature'] = signature
 
@@ -449,8 +462,8 @@ class FunctionDoc(NumpyDocString):
 
         if self._role:
             if not roles.has_key(self._role):
-                print "Warning: invalid role %s" % self._role
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                print("Warning: invalid role %s" % self._role)
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role, ''),
                                              func_name)
 
         out += super(FunctionDoc, self).__str__(func_role=self._role)
@@ -459,7 +472,7 @@ class FunctionDoc(NumpyDocString):
 
 class ClassDoc(NumpyDocString):
     def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
-                 config={}):
+                 config=None):
         if not inspect.isclass(cls) and cls is not None:
             raise ValueError("Expected a class or None, but got %r" % cls)
         self._cls = cls
@@ -475,7 +488,7 @@ class ClassDoc(NumpyDocString):
 
         NumpyDocString.__init__(self, doc)
 
-        if config.get('show_class_members', True):
+        if config is not None and config.get('show_class_members', True):
             if not self['Methods']:
                 self['Methods'] = [(name, '', '')
                                    for name in sorted(self.methods)]
@@ -487,12 +500,12 @@ class ClassDoc(NumpyDocString):
     def methods(self):
         if self._cls is None:
             return []
-        return [name for name,func in inspect.getmembers(self._cls)
+        return [name for name, func in inspect.getmembers(self._cls)
                 if not name.startswith('_') and callable(func)]
 
     @property
     def properties(self):
         if self._cls is None:
             return []
-        return [name for name,func in inspect.getmembers(self._cls)
+        return [name for name, func in inspect.getmembers(self._cls)
                 if not name.startswith('_') and func is None]
diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpy_ext/docscrape_sphinx.py
index 9f4350d..ca28300 100644
--- a/doc/sphinxext/numpy_ext/docscrape_sphinx.py
+++ b/doc/sphinxext/numpy_ext/docscrape_sphinx.py
@@ -1,9 +1,15 @@
-import re, inspect, textwrap, pydoc
-import sphinx
-from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+import re
+import inspect
+import textwrap
+import pydoc
+from .docscrape import NumpyDocString
+from .docscrape import FunctionDoc
+from .docscrape import ClassDoc
+
 
 class SphinxDocString(NumpyDocString):
-    def __init__(self, docstring, config={}):
+    def __init__(self, docstring, config=None):
+        config = {} if config is None else config
         self.use_plots = config.get('use_plots', False)
         NumpyDocString.__init__(self, docstring, config=config)
 
@@ -17,7 +23,7 @@ class SphinxDocString(NumpyDocString):
     def _str_indent(self, doc, indent=4):
         out = []
         for line in doc:
-            out += [' '*indent + line]
+            out += [' ' * indent + line]
         return out
 
     def _str_signature(self):
@@ -38,11 +44,11 @@ class SphinxDocString(NumpyDocString):
         if self[name]:
             out += self._str_field_list(name)
             out += ['']
-            for param,param_type,desc in self[name]:
+            for param, param_type, desc in self[name]:
                 out += self._str_indent(['**%s** : %s' % (param.strip(),
                                                           param_type)])
                 out += ['']
-                out += self._str_indent(desc,8)
+                out += self._str_indent(desc, 8)
                 out += ['']
         return out
 
@@ -78,13 +84,16 @@ class SphinxDocString(NumpyDocString):
                     others.append((param, param_type, desc))
 
             if autosum:
-                out += ['.. autosummary::', '   :toctree:', '']
+                # GAEL: Toctree commented out below because it creates
+                # hundreds of sphinx warnings
+                # out += ['.. autosummary::', '   :toctree:', '']
+                out += ['.. autosummary::', '']
                 out += autosum
 
             if others:
                 maxlen_0 = max([len(x[0]) for x in others])
                 maxlen_1 = max([len(x[1]) for x in others])
-                hdr = "="*maxlen_0 + "  " + "="*maxlen_1 + "  " + "="*10
+                hdr = "=" * maxlen_0 + "  " + "=" * maxlen_1 + "  " + "=" * 10
                 fmt = '%%%ds  %%%ds  ' % (maxlen_0, maxlen_1)
                 n_indent = maxlen_0 + maxlen_1 + 4
                 out += [hdr]
@@ -126,7 +135,7 @@ class SphinxDocString(NumpyDocString):
         if len(idx) == 0:
             return out
 
-        out += ['.. index:: %s' % idx.get('default','')]
+        out += ['.. index:: %s' % idx.get('default', '')]
         for section, references in idx.iteritems():
             if section == 'default':
                 continue
@@ -146,10 +155,11 @@ class SphinxDocString(NumpyDocString):
             out += ['']
             # Latex collects all references to a separate bibliography,
             # so we need to insert links to it
+            import sphinx  # local import to avoid test dependency
             if sphinx.__version__ >= "0.6":
-                out += ['.. only:: latex','']
+                out += ['.. only:: latex', '']
             else:
-                out += ['.. latexonly::','']
+                out += ['.. latexonly::', '']
             items = []
             for line in self['References']:
                 m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
@@ -178,33 +188,37 @@ class SphinxDocString(NumpyDocString):
         out += self._str_index() + ['']
         out += self._str_summary()
         out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Returns', 'Raises'):
+        for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
             out += self._str_param_list(param_list)
         out += self._str_warnings()
         out += self._str_see_also(func_role)
         out += self._str_section('Notes')
         out += self._str_references()
         out += self._str_examples()
-        for param_list in ('Attributes', 'Methods'):
+        for param_list in ('Methods',):
             out += self._str_member_list(param_list)
-        out = self._str_indent(out,indent)
+        out = self._str_indent(out, indent)
         return '\n'.join(out)
 
+
 class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
     def __init__(self, obj, doc=None, config={}):
         self.use_plots = config.get('use_plots', False)
         FunctionDoc.__init__(self, obj, doc=doc, config=config)
 
+
 class SphinxClassDoc(SphinxDocString, ClassDoc):
     def __init__(self, obj, doc=None, func_doc=None, config={}):
         self.use_plots = config.get('use_plots', False)
         ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
 
+
 class SphinxObjDoc(SphinxDocString):
-    def __init__(self, obj, doc=None, config={}):
+    def __init__(self, obj, doc=None, config=None):
         self._f = obj
         SphinxDocString.__init__(self, doc, config=config)
 
+
 def get_doc_object(obj, what=None, doc=None, config={}):
     if what is None:
         if inspect.isclass(obj):
diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpy_ext/numpydoc.py
index 630a432..6ff03e0 100644
--- a/doc/sphinxext/numpy_ext/numpydoc.py
+++ b/doc/sphinxext/numpy_ext/numpydoc.py
@@ -10,17 +10,24 @@ It will:
 - Convert Parameters etc. sections to field lists.
 - Convert See Also section to a See also entry.
 - Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
+- Extract the signature from the docstring, if it can't be determined
+  otherwise.
 
 .. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
 
 """
 
-import os, re, pydoc
-from docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
+from __future__ import unicode_literals
+
+import sys # Only needed to check Python version
+import os
+import re
+import pydoc
+from .docscrape_sphinx import get_doc_object
+from .docscrape_sphinx import SphinxDocString
 import inspect
 
+
 def mangle_docstrings(app, what, name, obj, options, lines,
                       reference_offset=[0]):
 
@@ -29,17 +36,20 @@ def mangle_docstrings(app, what, name, obj, options, lines,
 
     if what == 'module':
         # Strip top title
-        title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
-                              re.I|re.S)
-        lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
+        title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+                              re.I | re.S)
+        lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
     else:
-        doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
-        lines[:] = unicode(doc).split(u"\n")
+        doc = get_doc_object(obj, what, "\n".join(lines), config=cfg)
+        if sys.version_info[0] < 3:
+            lines[:] = unicode(doc).splitlines()
+        else:
+            lines[:] = str(doc).splitlines()
 
     if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
            obj.__name__:
         if hasattr(obj, '__module__'):
-            v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
+            v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
         else:
             v = dict(full_name=obj.__name__)
         lines += [u'', u'.. htmlonly::', '']
@@ -50,7 +60,7 @@ def mangle_docstrings(app, what, name, obj, options, lines,
     references = []
     for line in lines:
         line = line.strip()
-        m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
+        m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I)
         if m:
             references.append(m.group(1))
 
@@ -59,8 +69,8 @@ def mangle_docstrings(app, what, name, obj, options, lines,
     if references:
         for i, line in enumerate(lines):
             for r in references:
-                if re.match(ur'^\d+$', r):
-                    new_r = u"R%d" % (reference_offset[0] + int(r))
+                if re.match(r'^\d+$', r):
+                    new_r = "R%d" % (reference_offset[0] + int(r))
                 else:
                     new_r = u"%s%d" % (r, reference_offset[0])
                 lines[i] = lines[i].replace(u'[%s]_' % r,
@@ -70,27 +80,36 @@ def mangle_docstrings(app, what, name, obj, options, lines,
 
     reference_offset[0] += len(references)
 
-def mangle_signature(app, what, name, obj, options, sig, retann):
+
+def mangle_signature(app, what, name, obj,
+                     options, sig, retann):
     # Do not try to inspect classes that don't define `__init__`
     if (inspect.isclass(obj) and
         (not hasattr(obj, '__init__') or
         'initializes x; see ' in pydoc.getdoc(obj.__init__))):
         return '', ''
 
-    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
-    if not hasattr(obj, '__doc__'): return
+    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')):
+        return
+    if not hasattr(obj, '__doc__'):
+        return
 
     doc = SphinxDocString(pydoc.getdoc(obj))
     if doc['Signature']:
-        sig = re.sub(u"^[^(]*", u"", doc['Signature'])
-        return sig, u''
+        sig = re.sub("^[^(]*", "", doc['Signature'])
+        return sig, ''
+
 
 def setup(app, get_doc_object_=get_doc_object):
     global get_doc_object
     get_doc_object = get_doc_object_
 
-    app.connect('autodoc-process-docstring', mangle_docstrings)
-    app.connect('autodoc-process-signature', mangle_signature)
+    if sys.version_info[0] < 3:
+        app.connect(b'autodoc-process-docstring', mangle_docstrings)
+        app.connect(b'autodoc-process-signature', mangle_signature)
+    else:
+        app.connect('autodoc-process-docstring', mangle_docstrings)
+        app.connect('autodoc-process-signature', mangle_signature)
     app.add_config_value('numpydoc_edit_link', None, False)
     app.add_config_value('numpydoc_use_plots', None, False)
     app.add_config_value('numpydoc_show_class_members', True, True)
@@ -99,13 +118,18 @@ def setup(app, get_doc_object_=get_doc_object):
     app.add_domain(NumpyPythonDomain)
     app.add_domain(NumpyCDomain)
 
-#------------------------------------------------------------------------------
+#-----------------------------------------------------------------------------
 # Docstring-mangling domains
-#------------------------------------------------------------------------------
+#-----------------------------------------------------------------------------
+
+try:
+    import sphinx  # lazy to avoid test dependency
+except ImportError:
+    CDomain = PythonDomain = object
+else:
+    from sphinx.domains.c import CDomain
+    from sphinx.domains.python import PythonDomain
 
-from docutils.statemachine import ViewList
-from sphinx.domains.c import CDomain
-from sphinx.domains.python import PythonDomain
 
 class ManglingDomainBase(object):
     directive_mangling_map = {}
@@ -119,6 +143,7 @@ class ManglingDomainBase(object):
             self.directives[name] = wrap_mangling_directive(
                 self.directives[name], objtype)
 
+
 class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
     name = 'np'
     directive_mangling_map = {
@@ -131,6 +156,7 @@ class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
         'attribute': 'attribute',
     }
 
+
 class NumpyCDomain(ManglingDomainBase, CDomain):
     name = 'np-c'
     directive_mangling_map = {
@@ -141,6 +167,7 @@ class NumpyCDomain(ManglingDomainBase, CDomain):
         'var': 'object',
     }
 
+
 def wrap_mangling_directive(base_directive, objtype):
     class directive(base_directive):
         def run(self):
@@ -156,6 +183,8 @@ def wrap_mangling_directive(base_directive, objtype):
 
             lines = list(self.content)
             mangle_docstrings(env.app, objtype, name, None, None, lines)
+            # local import to avoid testing dependency
+            from docutils.statemachine import ViewList
             self.content = ViewList(lines, self.content.parent)
 
             return base_directive.run(self)
diff --git a/doc/sphinxext/numpy_ext_old/docscrape.py b/doc/sphinxext/numpy_ext_old/docscrape.py
deleted file mode 100644
index fb4b544..0000000
--- a/doc/sphinxext/numpy_ext_old/docscrape.py
+++ /dev/null
@@ -1,490 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-4
-class Reader(object):
-    """A line-based string reader.
-
-    """
-    def __init__(self, data):
-        """
-        Parameters
-        ----------
-        data : str
-           String with lines separated by '\n'.
-
-        """
-        if isinstance(data,list):
-            self._str = data
-        else:
-            self._str = data.split('\n') # store string as list of lines
-
-        self.reset()
-
-    def __getitem__(self, n):
-        return self._str[n]
-
-    def reset(self):
-        self._l = 0 # current line nr
-
-    def read(self):
-        if not self.eof():
-            out = self[self._l]
-            self._l += 1
-            return out
-        else:
-            return ''
-
-    def seek_next_non_empty_line(self):
-        for l in self[self._l:]:
-            if l.strip():
-                break
-            else:
-                self._l += 1
-
-    def eof(self):
-        return self._l >= len(self._str)
-
-    def read_to_condition(self, condition_func):
-        start = self._l
-        for line in self[start:]:
-            if condition_func(line):
-                return self[start:self._l]
-            self._l += 1
-            if self.eof():
-                return self[start:self._l+1]
-        return []
-
-    def read_to_next_empty_line(self):
-        self.seek_next_non_empty_line()
-        def is_empty(line):
-            return not line.strip()
-        return self.read_to_condition(is_empty)
-
-    def read_to_next_unindented_line(self):
-        def is_unindented(line):
-            return (line.strip() and (len(line.lstrip()) == len(line)))
-        return self.read_to_condition(is_unindented)
-
-    def peek(self,n=0):
-        if self._l + n < len(self._str):
-            return self[self._l + n]
-        else:
-            return ''
-
-    def is_empty(self):
-        return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
-    def __init__(self,docstring):
-        docstring = textwrap.dedent(docstring).split('\n')
-
-        self._doc = Reader(docstring)
-        self._parsed_data = {
-            'Signature': '',
-            'Summary': [''],
-            'Extended Summary': [],
-            'Parameters': [],
-            'Returns': [],
-            'Raises': [],
-            'Warns': [],
-            'Other Parameters': [],
-            'Attributes': [],
-            'Methods': [],
-            'See Also': [],
-            'Notes': [],
-            'Warnings': [],
-            'References': '',
-            'Examples': '',
-            'index': {}
-            }
-
-        self._parse()
-
-    def __getitem__(self,key):
-        return self._parsed_data[key]
-
-    def __setitem__(self,key,val):
-        if not self._parsed_data.has_key(key):
-            warn("Unknown section %s" % key)
-        else:
-            self._parsed_data[key] = val
-
-    def _is_at_section(self):
-        self._doc.seek_next_non_empty_line()
-
-        if self._doc.eof():
-            return False
-
-        l1 = self._doc.peek().strip()  # e.g. Parameters
-
-        if l1.startswith('.. index::'):
-            return True
-
-        l2 = self._doc.peek(1).strip() #    ---------- or ==========
-        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
-    def _strip(self,doc):
-        i = 0
-        j = 0
-        for i,line in enumerate(doc):
-            if line.strip(): break
-
-        for j,line in enumerate(doc[::-1]):
-            if line.strip(): break
-
-        return doc[i:len(doc)-j]
-
-    def _read_to_next_section(self):
-        section = self._doc.read_to_next_empty_line()
-
-        while not self._is_at_section() and not self._doc.eof():
-            if not self._doc.peek(-1).strip(): # previous line was empty
-                section += ['']
-
-            section += self._doc.read_to_next_empty_line()
-
-        return section
-
-    def _read_sections(self):
-        while not self._doc.eof():
-            data = self._read_to_next_section()
-            name = data[0].strip()
-
-            if name.startswith('..'): # index section
-                yield name, data[1:]
-            elif len(data) < 2:
-                yield StopIteration
-            else:
-                yield name, self._strip(data[2:])
-
-    def _parse_param_list(self,content):
-        r = Reader(content)
-        params = []
-        while not r.eof():
-            header = r.read().strip()
-            if ' : ' in header:
-                arg_name, arg_type = header.split(' : ')[:2]
-            else:
-                arg_name, arg_type = header, ''
-
-            desc = r.read_to_next_unindented_line()
-            desc = dedent_lines(desc)
-
-            params.append((arg_name,arg_type,desc))
-
-        return params
-
-
-    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
-                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-    def _parse_see_also(self, content):
-        """
-        func_name : Descriptive text
-            continued text
-        another_func_name : Descriptive text
-        func_name1, func_name2, :meth:`func_name`, func_name3
-
-        """
-        items = []
-
-        def parse_item_name(text):
-            """Match ':role:`name`' or 'name'"""
-            m = self._name_rgx.match(text)
-            if m:
-                g = m.groups()
-                if g[1] is None:
-                    return g[3], None
-                else:
-                    return g[2], g[1]
-            raise ValueError("%s is not a item name" % text)
-
-        def push_item(name, rest):
-            if not name:
-                return
-            name, role = parse_item_name(name)
-            items.append((name, list(rest), role))
-            del rest[:]
-
-        current_func = None
-        rest = []
-
-        for line in content:
-            if not line.strip(): continue
-
-            m = self._name_rgx.match(line)
-            if m and line[m.end():].strip().startswith(':'):
-                push_item(current_func, rest)
-                current_func, line = line[:m.end()], line[m.end():]
-                rest = [line.split(':', 1)[1].strip()]
-                if not rest[0]:
-                    rest = []
-            elif not line.startswith(' '):
-                push_item(current_func, rest)
-                current_func = None
-                if ',' in line:
-                    for func in line.split(','):
-                        push_item(func, [])
-                elif line.strip():
-                    current_func = line
-            elif current_func is not None:
-                rest.append(line.strip())
-        push_item(current_func, rest)
-        return items
-
-    def _parse_index(self, section, content):
-        """
-        .. index: default
-           :refguide: something, else, and more
-
-        """
-        def strip_each_in(lst):
-            return [s.strip() for s in lst]
-
-        out = {}
-        section = section.split('::')
-        if len(section) > 1:
-            out['default'] = strip_each_in(section[1].split(','))[0]
-        for line in content:
-            line = line.split(':')
-            if len(line) > 2:
-                out[line[1]] = strip_each_in(line[2].split(','))
-        return out
-
-    def _parse_summary(self):
-        """Grab signature (if given) and summary"""
-        if self._is_at_section():
-            return
-
-        summary = self._doc.read_to_next_empty_line()
-        summary_str = " ".join([s.strip() for s in summary]).strip()
-        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
-            self['Signature'] = summary_str
-            if not self._is_at_section():
-                self['Summary'] = self._doc.read_to_next_empty_line()
-        else:
-            self['Summary'] = summary
-
-        if not self._is_at_section():
-            self['Extended Summary'] = self._read_to_next_section()
-
-    def _parse(self):
-        self._doc.reset()
-        self._parse_summary()
-
-        for (section,content) in self._read_sections():
-            if not section.startswith('..'):
-                section = ' '.join([s.capitalize() for s in section.split(' ')])
-            if section in ('Parameters', 'Attributes', 'Methods',
-                           'Returns', 'Raises', 'Warns'):
-                self[section] = self._parse_param_list(content)
-            elif section.startswith('.. index::'):
-                self['index'] = self._parse_index(section, content)
-            elif section == 'See Also':
-                self['See Also'] = self._parse_see_also(content)
-            else:
-                self[section] = content
-
-    # string conversion routines
-
-    def _str_header(self, name, symbol='-'):
-        return [name, len(name)*symbol]
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        if self['Signature']:
-            return [self['Signature'].replace('*','\*')] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        if self['Summary']:
-            return self['Summary'] + ['']
-        else:
-            return []
-
-    def _str_extended_summary(self):
-        if self['Extended Summary']:
-            return self['Extended Summary'] + ['']
-        else:
-            return []
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            for param,param_type,desc in self[name]:
-                out += ['%s : %s' % (param, param_type)]
-                out += self._str_indent(desc)
-            out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += self[name]
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        if not self['See Also']: return []
-        out = []
-        out += self._str_header("See Also")
-        last_had_desc = True
-        for func, desc, role in self['See Also']:
-            if role:
-                link = ':%s:`%s`' % (role, func)
-            elif func_role:
-                link = ':%s:`%s`' % (func_role, func)
-            else:
-                link = "`%s`_" % func
-            if desc or last_had_desc:
-                out += ['']
-                out += [link]
-            else:
-                out[-1] += ", %s" % link
-            if desc:
-                out += self._str_indent([' '.join(desc)])
-                last_had_desc = True
-            else:
-                last_had_desc = False
-        out += ['']
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            out += ['   :%s: %s' % (section, ', '.join(references))]
-        return out
-
-    def __str__(self, func_role=''):
-        out = []
-        out += self._str_signature()
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters','Returns','Raises'):
-            out += self._str_param_list(param_list)
-        out += self._str_section('Warnings')
-        out += self._str_see_also(func_role)
-        for s in ('Notes','References','Examples'):
-            out += self._str_section(s)
-        out += self._str_index()
-        return '\n'.join(out)
-
-
-def indent(str,indent=4):
-    indent_str = ' '*indent
-    if str is None:
-        return indent_str
-    lines = str.split('\n')
-    return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
-    """Deindent a list of lines maximally"""
-    return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
-    return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func'):
-        self._f = func
-        self._role = role # e.g. "func" or "meth"
-        try:
-            NumpyDocString.__init__(self,inspect.getdoc(func) or '')
-        except ValueError, e:
-            print '*'*78
-            print "ERROR: '%s' while parsing `%s`" % (e, self._f)
-            print '*'*78
-            #print "Docstring follows:"
-            #print doclines
-            #print '='*78
-
-        if not self['Signature']:
-            func, func_name = self.get_func()
-            try:
-                # try to read signature
-                argspec = inspect.getargspec(func)
-                argspec = inspect.formatargspec(*argspec)
-                argspec = argspec.replace('*','\*')
-                signature = '%s%s' % (func_name, argspec)
-            except TypeError, e:
-                signature = '%s()' % func_name
-            self['Signature'] = signature
-
-    def get_func(self):
-        func_name = getattr(self._f, '__name__', self.__class__.__name__)
-        if inspect.isclass(self._f):
-            func = getattr(self._f, '__call__', self._f.__init__)
-        else:
-            func = self._f
-        return func, func_name
-
-    def __str__(self):
-        out = ''
-
-        func, func_name = self.get_func()
-        signature = self['Signature'].replace('*', '\*')
-
-        roles = {'func': 'function',
-                 'meth': 'method'}
-
-        if self._role:
-            if not roles.has_key(self._role):
-                print "Warning: invalid role %s" % self._role
-            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
-                                             func_name)
-
-        out += super(FunctionDoc, self).__str__(func_role=self._role)
-        return out
-
-
-class ClassDoc(NumpyDocString):
-    def __init__(self,cls,modulename='',func_doc=FunctionDoc):
-        if not inspect.isclass(cls):
-            raise ValueError("Initialise using a class. Got %r" % cls)
-        self._cls = cls
-
-        if modulename and not modulename.endswith('.'):
-            modulename += '.'
-        self._mod = modulename
-        self._name = cls.__name__
-        self._func_doc = func_doc
-
-        NumpyDocString.__init__(self, pydoc.getdoc(cls))
-
-    @property
-    def methods(self):
-        return [name for name,func in inspect.getmembers(self._cls)
-                if not name.startswith('_') and callable(func)]
-
-    def __str__(self):
-        out = ''
-        out += super(ClassDoc, self).__str__()
-        out += "\n\n"
-
-        #for m in self.methods:
-        #    print "Parsing `%s`" % m
-        #    out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
-        #    out += '.. index::\n   single: %s; %s\n\n' % (self._name, m)
-
-        return out
diff --git a/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py b/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py
deleted file mode 100644
index d431ecd..0000000
--- a/doc/sphinxext/numpy_ext_old/docscrape_sphinx.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import re, inspect, textwrap, pydoc
-from docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-class SphinxDocString(NumpyDocString):
-    # string conversion routines
-    def _str_header(self, name, symbol='`'):
-        return ['.. rubric:: ' + name, '']
-
-    def _str_field_list(self, name):
-        return [':' + name + ':']
-
-    def _str_indent(self, doc, indent=4):
-        out = []
-        for line in doc:
-            out += [' '*indent + line]
-        return out
-
-    def _str_signature(self):
-        return ['']
-        if self['Signature']:
-            return ['``%s``' % self['Signature']] + ['']
-        else:
-            return ['']
-
-    def _str_summary(self):
-        return self['Summary'] + ['']
-
-    def _str_extended_summary(self):
-        return self['Extended Summary'] + ['']
-
-    def _str_param_list(self, name):
-        out = []
-        if self[name]:
-            out += self._str_field_list(name)
-            out += ['']
-            for param,param_type,desc in self[name]:
-                out += self._str_indent(['**%s** : %s' % (param.strip(),
-                                                          param_type)])
-                out += ['']
-                out += self._str_indent(desc,8)
-                out += ['']
-        return out
-
-    def _str_section(self, name):
-        out = []
-        if self[name]:
-            out += self._str_header(name)
-            out += ['']
-            content = textwrap.dedent("\n".join(self[name])).split("\n")
-            out += content
-            out += ['']
-        return out
-
-    def _str_see_also(self, func_role):
-        out = []
-        if self['See Also']:
-            see_also = super(SphinxDocString, self)._str_see_also(func_role)
-            out = ['.. seealso::', '']
-            out += self._str_indent(see_also[2:])
-        return out
-
-    def _str_warnings(self):
-        out = []
-        if self['Warnings']:
-            out = ['.. warning::', '']
-            out += self._str_indent(self['Warnings'])
-        return out
-
-    def _str_index(self):
-        idx = self['index']
-        out = []
-        if len(idx) == 0:
-            return out
-
-        out += ['.. index:: %s' % idx.get('default','')]
-        for section, references in idx.iteritems():
-            if section == 'default':
-                continue
-            elif section == 'refguide':
-                out += ['   single: %s' % (', '.join(references))]
-            else:
-                out += ['   %s: %s' % (section, ','.join(references))]
-        return out
-
-    def _str_references(self):
-        out = []
-        if self['References']:
-            out += self._str_header('References')
-            if isinstance(self['References'], str):
-                self['References'] = [self['References']]
-            out.extend(self['References'])
-            out += ['']
-        return out
-
-    def __str__(self, indent=0, func_role="obj"):
-        out = []
-        out += self._str_signature()
-        out += self._str_index() + ['']
-        out += self._str_summary()
-        out += self._str_extended_summary()
-        for param_list in ('Parameters', 'Attributes', 'Methods',
-                           'Returns','Raises'):
-            out += self._str_param_list(param_list)
-        out += self._str_warnings()
-        out += self._str_see_also(func_role)
-        out += self._str_section('Notes')
-        out += self._str_references()
-        out += self._str_section('Examples')
-        out = self._str_indent(out,indent)
-        return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
-    pass
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
-    pass
-
-def get_doc_object(obj, what=None):
-    if what is None:
-        if inspect.isclass(obj):
-            what = 'class'
-        elif inspect.ismodule(obj):
-            what = 'module'
-        elif callable(obj):
-            what = 'function'
-        else:
-            what = 'object'
-    if what == 'class':
-        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
-    elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, '')
-    else:
-        return SphinxDocString(pydoc.getdoc(obj))
diff --git a/doc/sphinxext/numpy_ext_old/numpydoc.py b/doc/sphinxext/numpy_ext_old/numpydoc.py
deleted file mode 100644
index 5e979ea..0000000
--- a/doc/sphinxext/numpy_ext_old/numpydoc.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""
-========
-numpydoc
-========
-
-Sphinx extension that handles docstrings in the Numpy standard format. [1]
-
-It will:
-
-- Convert Parameters etc. sections to field lists.
-- Convert See Also section to a See also entry.
-- Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
-
-.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
-
-"""
-
-import os, re, pydoc
-from docscrape_sphinx import get_doc_object, SphinxDocString
-import inspect
-
-def mangle_docstrings(app, what, name, obj, options, lines,
-                      reference_offset=[0]):
-    if what == 'module':
-        # Strip top title
-        title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
-                              re.I|re.S)
-        lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
-    else:
-        doc = get_doc_object(obj, what)
-        lines[:] = str(doc).split("\n")
-
-    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
-           obj.__name__:
-        v = dict(full_name=obj.__name__)
-        lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
-
-    # replace reference numbers so that there are no duplicates
-    references = []
-    for l in lines:
-        l = l.strip()
-        if l.startswith('.. ['):
-            try:
-                references.append(int(l[len('.. ['):l.index(']')]))
-            except ValueError:
-                print "WARNING: invalid reference in %s docstring" % name
-
-    # Start renaming from the biggest number, otherwise we may
-    # overwrite references.
-    references.sort()
-    if references:
-        for i, line in enumerate(lines):
-            for r in references:
-                new_r = reference_offset[0] + r
-                lines[i] = lines[i].replace('[%d]_' % r,
-                                            '[%d]_' % new_r)
-                lines[i] = lines[i].replace('.. [%d]' % r,
-                                            '.. [%d]' % new_r)
-
-    reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj, options, sig, retann):
-    # Do not try to inspect classes that don't define `__init__`
-    if (inspect.isclass(obj) and
-        'initializes x; see ' in pydoc.getdoc(obj.__init__)):
-        return '', ''
-
-    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
-    if not hasattr(obj, '__doc__'): return
-
-    doc = SphinxDocString(pydoc.getdoc(obj))
-    if doc['Signature']:
-        sig = re.sub("^[^(]*", "", doc['Signature'])
-        return sig, ''
-
-def initialize(app):
-    try:
-        app.connect('autodoc-process-signature', mangle_signature)
-    except:
-        monkeypatch_sphinx_ext_autodoc()
-
-def setup(app, get_doc_object_=get_doc_object):
-    global get_doc_object
-    get_doc_object = get_doc_object_
-
-    app.connect('autodoc-process-docstring', mangle_docstrings)
-    app.connect('builder-inited', initialize)
-    app.add_config_value('numpydoc_edit_link', None, True)
-
-#------------------------------------------------------------------------------
-# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
-#------------------------------------------------------------------------------
-
-def monkeypatch_sphinx_ext_autodoc():
-    global _original_format_signature
-    import sphinx.ext.autodoc
-
-    if sphinx.ext.autodoc.format_signature is our_format_signature:
-        return
-
-    print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
-    _original_format_signature = sphinx.ext.autodoc.format_signature
-    sphinx.ext.autodoc.format_signature = our_format_signature
-
-def our_format_signature(what, obj):
-    r = mangle_signature(None, what, None, obj, None, None, None)
-    if r is not None:
-        return r[0]
-    else:
-        return _original_format_signature(what, obj)
diff --git a/doc/this_project.inc b/doc/this_project.inc
new file mode 100644
index 0000000..32595b4
--- /dev/null
+++ b/doc/this_project.inc
@@ -0,0 +1,4 @@
+.. mne-python
+.. _mne-python: http://mne-tools.github.io/mne-python-intro
+.. _`mne-python GitHub`: https://github.com/mne-tools/mne-python
+.. _`mne-python sample dataset`: ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE-sample-data-processed.tar.gz
diff --git a/doc/tutorials.rst b/doc/tutorials.rst
new file mode 100644
index 0000000..290e5b3
--- /dev/null
+++ b/doc/tutorials.rst
@@ -0,0 +1,84 @@
+.. _tutorials:
+
+Tutorials
+=========
+
+Getting started
+---------------
+
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_introduction.rst
+
+
+Working with MNE data structures
+--------------------------------
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_creating_data_structures.rst
+   auto_tutorials/plot_info.rst
+   auto_tutorials/plot_raw_objects.rst
+   auto_tutorials/plot_epochs_objects.rst
+   auto_tutorials/plot_epochs_to_data_frame.rst
+
+
+Preprocessing
+-------------
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_ica_from_raw.rst
+
+Source localization
+-------------------
+
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_source_localization_basics.rst
+
+
+Statistics
+----------
+
+Sensor space
+^^^^^^^^^^^^
+
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_cluster_methods_tutorial.rst
+   auto_tutorials/plot_spatio_temporal_cluster_stats_sensor.rst
+   auto_tutorials/plot_cluster_1samp_test_time_frequency.rst
+   auto_tutorials/plot_cluster_stats_time_frequency.rst
+
+
+Source space
+^^^^^^^^^^^^
+
+.. toctree::
+   :maxdepth: 1
+
+   auto_tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.rst
+   auto_tutorials/plot_cluster_stats_spatio_temporal_2samp.rst
+   auto_tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.rst
+   auto_tutorials/plot_cluster_stats_spatio_temporal.rst
+
+Visualization and Reporting
+---------------------------
+
+.. toctree::
+   :maxdepth: 1
+
+   tutorials/report.rst
+
+
+Command line tools
+------------------
+
+.. toctree::
+   :maxdepth: 1
+
+   tutorials/command_line.rst
diff --git a/doc/source/_images/plot_read_and_write_raw_data.png b/doc/tutorials/_images/plot_read_and_write_raw_data.png
similarity index 100%
rename from doc/source/_images/plot_read_and_write_raw_data.png
rename to doc/tutorials/_images/plot_read_and_write_raw_data.png
diff --git a/doc/source/_images/plot_read_epochs.png b/doc/tutorials/_images/plot_read_epochs.png
similarity index 100%
rename from doc/source/_images/plot_read_epochs.png
rename to doc/tutorials/_images/plot_read_epochs.png
diff --git a/doc/tutorials/_images/plot_time_frequency.png b/doc/tutorials/_images/plot_time_frequency.png
new file mode 100644
index 0000000..353749d
Binary files /dev/null and b/doc/tutorials/_images/plot_time_frequency.png differ
diff --git a/doc/source/command_line_tutorial.rst b/doc/tutorials/command_line.rst
similarity index 100%
rename from doc/source/command_line_tutorial.rst
rename to doc/tutorials/command_line.rst
diff --git a/doc/source/mne_report_tutorial.rst b/doc/tutorials/report.rst
similarity index 60%
rename from doc/source/mne_report_tutorial.rst
rename to doc/tutorials/report.rst
index f74c08d..2925663 100644
--- a/doc/source/mne_report_tutorial.rst
+++ b/doc/tutorials/report.rst
@@ -1,8 +1,8 @@
 .. _mne_report_tutorial:
 
-=================================================
-Tutorial: Getting started with MNE report command
-=================================================
+=======================================
+Getting started with MNE report command
+=======================================
 
 This quick start will show you how to run the `mne report` command on the
 sample data set provided with MNE.
@@ -43,12 +43,22 @@ if available)::
 To properly render `trans` and `covariance` files, add the measurement information::
 
     mne report --path MNE-sample-data/ --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ 
-        --subject sample --subjects_dir MNE-sample-data/subjects --verbose
+        --subject sample --subjects-dir MNE-sample-data/subjects --verbose
+
+To render whitened `evoked` files with baseline correction, add the noise covariance file::
+    
+    mne report --path MNE-sample-data/ --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ 
+        --cov MNE-sample-data/MEG/sample/sample_audvis-cov.fif --bmax 0 --subject sample \
+        --subjects-dir MNE-sample-data/subjects --verbose
 
 To generate the report in parallel::
 
     mne report --path MNE-sample-data/ --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ 
-        --subject sample --subjects_dir MNE-sample-data/subjects --verbose --jobs 6
+        --subject sample --subjects-dir MNE-sample-data/subjects --verbose --jobs 6
+
+For help on all the available options, do::
+
+    mne report --help
 
 The Python interface
 --------------------
@@ -62,21 +72,32 @@ the required functions:
 Generate the report:
 
     >>> path = sample.data_path()
-    >>> report = Report()
+    >>> report = Report(verbose=True)
     Embedding : jquery-1.10.2.min.js
     Embedding : jquery-ui.min.js
     Embedding : bootstrap.min.js
     Embedding : jquery-ui.min.css
     Embedding : bootstrap.min.css
 
-Only include \*-eve.fif files in the report:
-
-    >>> report.parse_folder(data_path=path, pattern='*-eve.fif') # doctest: +SKIP
-    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif
-    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_eog-eve.fif
-    Rendering : .../MNE-sample-data/MEG/sample/ernoise_raw-eve.fif
-    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_raw-eve.fif
-    Rendering : .../MNE-sample-data/MEG/sample/sample_audvis_ecg-eve.fif
+Only include \*audvis_raw.fif and \*-eve.fif files in the report:
+
+    >>> report.parse_folder(data_path=path, pattern=['*audvis_raw.fif', '*-eve.fif']) # doctest: +SKIP
+    Iterating over 6 potential files (this may take some time)
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif
+    Opening raw data file /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw.fif...
+        Read a total of 3 projection items:
+            PCA-v1 (1 x 102)  idle
+            PCA-v2 (1 x 102)  idle
+            PCA-v3 (1 x 102)  idle
+    Current compensation grade : 0
+        Range : 25800 ... 192599 =     42.956 ...   320.670 secs
+    Ready.
+    Adding average EEG reference projection.
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_eog-eve.fif
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/ernoise_raw-eve.fif
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_raw-eve.fif
+    Rendering : /home/mainak/Desktop/projects/mne-python/examples/MNE-sample-data/MEG/sample/sample_audvis_ecg-eve.fif
 
 Save the report as an html, but do not open the html in a browser:
 
@@ -88,8 +109,8 @@ Custom plots can be added to the report. Let us first generate a custom plot:
 
     >>> from mne import read_evokeds
     >>> fname = path + '/MEG/sample/sample_audvis-ave.fif'
-    >>> evoked = read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) # doctest:+ELLIPSIS
-    Reading .../MNE-sample-data/MEG/sample/sample_audvis-ave.fif ...
+    >>> evoked = read_evokeds(fname, condition='Left Auditory', baseline=(None, 0), verbose=True) # doctest:+ELLIPSIS
+    Reading ...
         Read a total of 4 projection items:
             PCA-v1 (1 x 102) active
             PCA-v2 (1 x 102) active
@@ -99,19 +120,19 @@ Custom plots can be added to the report. Let us first generate a custom plot:
             t =    -199.80 ...     499.49 ms (Left Auditory)
             0 CTF compensation matrices available
             nave = 55 - aspect type = 100
-    Projections have already been applied. Doing nothing.
+    Projections have already been applied. Setting proj attribute to True.
     Applying baseline correction ... (mode: mean)
     >>> fig = evoked.plot() # doctest: +SKIP
 
 To add the custom plot to the report, do:
 
-    >>> report.add_section(fig, captions='Left Auditory', section='evoked') # doctest: +SKIP
+    >>> report.add_figs_to_section(fig, captions='Left Auditory', section='evoked') # doctest: +SKIP
     >>> report.save('report.html', overwrite=True) # doctest: +SKIP
     Rendering : Table of Contents...
 
 The MNE report command internally manages the sections so that plots belonging to the same section
 are rendered consecutively. Within a section, the plots are ordered in the same order that they were 
-added using the `add_section` command. Each section is identified by a toggle button in the navigation 
+added using the `add_figs_to_section` command. Each section is identified by a toggle button in the navigation 
 bar of the report which can be used to show or hide the contents of the section.
 
 That's it!
diff --git a/doc/upload_html.sh b/doc/upload_html.sh
index ed75d88..bc85f85 100755
--- a/doc/upload_html.sh
+++ b/doc/upload_html.sh
@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 
 #scp -r build/html/* martinos-data:/web/html/mne/
-rsync -rltvz --delete --perms --chmod=g+w build/html/ martinos-data:/web/html/ext/mne/stable -essh
+rsync -rltvz --delete --perms --chmod=g+w _build/html/ martinos-data:/web/html/ext/mne/stable -essh
 ssh martinos-data "chgrp -R megweb /web/html/ext/mne/stable"
diff --git a/doc/utils/extract_config_doc.py b/doc/utils/extract_config_doc.py
deleted file mode 100755
index c101bcf..0000000
--- a/doc/utils/extract_config_doc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#! /usr/bin/env python
-
-"""
-This script will extract the documentation from the full_configbase.py
-module, reformat it somewhat, and write it as a reST document in
-$PYFIFF/doc/source.
-
-"""
-
-import os
-import re
-from fiff import __file__ as fifffile
-
-fiffpath = os.path.join(os.path.split(fifffile)[0], os.pardir)
-
-confid = open(os.path.join(
-    fiffpath, "data", "configfiles", "full_configbase.py"), "r")
-docfid = open(os.path.join(
-    fiffpath, "doc", "source", "config_doc.rst"), "w")
-
-docfid.write(".. _config_doc:\n\n")
-
-write = False
-space = False
-
-def flip(value):
-    if value:
-        return False
-    else:
-        return True
-
-sectionhead = re.compile("(<)([\w\s]+)(>)")
-
-def get_head(line):
-
-    m = sectionhead.search(line)
-    if m:
-        head = m.groups()[1]
-    else:
-        return ""
-
-    length = len(head)
-    head = "\n\n%s\n" % head
-    for i in range(length):
-        head = "%s-" % head
-    head = "%s\n\n" % head
-
-    return head
-
-for num, line in enumerate(confid):
-
-    if re.match("-+\n", line):
-        space = True
-        newline = ""
-        for i in range(len(line) - 1):
-            newline = "%s^" % newline
-        line = "%s\n" % newline
-    elif re.match("[ \t\n]+", line):
-        space = False
-    if line.startswith("#-"):
-        docfid.write(get_head(line))
-    else:
-        if line.startswith("\"\"\""):
-            write = flip(write)
-            lastflip = num
-    if space:
-        line = "%s\n" % line
-
-    if write and not num == lastflip:
-        docfid.write(line)
-
-confid.close()
-docfid.close()
diff --git a/doc/utils/lut2sphinxtbl.py b/doc/utils/lut2sphinxtbl.py
deleted file mode 100755
index 02051ed..0000000
--- a/doc/utils/lut2sphinxtbl.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#! /usr/bin/env python
-"""
-Usage: lut2sphinxtbl.py lutfile sphinxfile atlasname
-"""
-import os
-import re
-import sys
-import numpy as np
-
-if len(sys.argv) < 2:
-    print __doc__
-    sys.exit(0)
-
-
-lutfile = sys.argv[1]
-spxfile = sys.argv[2]
-namelist = []
-for i, arg in enumerate(sys.argv):
-    if i > 2:
-        namelist.append(arg)
-atlasname = " ".join(namelist)
-
-lutarr = np.genfromtxt(lutfile, str)
-lutarr = lutarr[:,:2]
-maxid = 0
-maxname = 0
-for row in lutarr:
-    if len(row[0]) > maxid:
-        maxid = len(row[0])
-    if len(row[1]) > maxname:
-        maxname = len(row[1])
-leftbar = max(maxid, 3)
-rightbar = max(maxname, 20)
-
-fid = open(spxfile, "w")
-
-fid.write(".. _%s:\n\n" % os.path.splitext(os.path.split(spxfile)[1])[0])
-fid.write("%s\n" % atlasname)
-for i in range(len(atlasname)):
-    fid.write("-")
-fid.write("\n\n")
-leftline = ""
-for i in range(leftbar):
-    leftline = "".join([leftline, "="])
-rightline = ""
-for i in range(rightbar):
-    rightline = "".join([rightline, "="])
-fid.write("%s   %s\nID     Region\n%s   %s\n" % (leftline, rightline, leftline, rightline))
-for row in lutarr:
-    name = row[1]
-    if not re.match("[rR](h|ight|\-).*", name) and not re.match("[Uu]nknown", name):
-        id = row[0][-3:]
-        if len(id) > 3:
-            id = int(id[-3:])
-        else:
-            id = int(id)
-        m = re.match("(([lL])(h|eft|\-)(\-*))(.*)", name)
-        if m:
-            name = name[len(m.group(1)):].capitalize()
-        space = ""
-        for i in range(7-len(str(id))):
-            space = "".join([space, " "])
-        fid.write("%d%s%s\n" % (id, space, name))
-
-fid.write("%s   %s\n\n" % (leftline, rightline))
diff --git a/doc/utils/make_clean_config.py b/doc/utils/make_clean_config.py
deleted file mode 100755
index ce35036..0000000
--- a/doc/utils/make_clean_config.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#! /usr/bin/env python
-
-import os
-
-fullfid = open(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir,
-                            "data", "configfiles", "full_configbase.py"), "r")
-cleanfid = open(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir,
-                            "data", "configfiles", "clean_configbase.py"), "w")
-
-write = True
-lastflip = None
-
-def flip(value):
-    if value:
-        return False
-    else:
-        return True
-
-for num, line in enumerate(fullfid):
-
-    if not line.startswith("#--"):
-        if line.startswith("\"\"\"") and num > 15:
-            write = flip(write)
-            lastflip = num
-
-        if write and not lastflip == num:
-            cleanfid.write(line)
-
-fullfid.close()
-cleanfid.close()
diff --git a/doc/source/whats_new.rst b/doc/whats_new.rst
similarity index 51%
rename from doc/source/whats_new.rst
rename to doc/whats_new.rst
index 23919c5..0d980e2 100644
--- a/doc/source/whats_new.rst
+++ b/doc/whats_new.rst
@@ -1,5 +1,401 @@
 What's new
 ==========
+..
+    Note, we are now using links to highlight new functions and classes.
+    Please be sure to follow the examples below like :func:`mne.stats.f_mway_rm`, so the whats_new page will have a link to the function/class documentation.
+
+.. _changes_0_10:
+
+Version 0.10
+------------
+
+Changelog
+~~~~~~~~~
+
+    - Add support for generalized M-way repeated measures ANOVA for fully balanced designs with :func:`mne.stats.f_mway_rm` by `Denis Engemann`_
+
+    - Add epochs browser to interactively view and manipulate epochs with :func:`mne.viz.plot_epochs` by `Jaakko Leppakangas`_
+
+    - Speed up TF-MxNE inverse solver with block coordinate descent by `Daniel Strohmeier`_ and `Yousra Bekhti`_
+
+    - Speed up zero-phase overlap-add (default) filtering by a factor of up to 2 using linearity by `Ross Maddox`_ and `Eric Larson`_
+
+    - Add support for scaling and adjusting the number of channels/time per view by `Jaakko Leppakangas`_
+
+    - Add support to toggle the show/hide state of all sections with a single keypress ('t') in :class:`mne.report.Report` by `Mainak Jas`_
+
+    - Add support for BEM model creation :func:`mne.make_bem_model` by `Eric Larson`_
+
+    - Add support for BEM solution computation :func:`mne.make_bem_solution` by `Eric Larson`_
+
+    - Add ICA plotters for raw and epoch components by `Jaakko Leppakangas`_
+
+    - Add new object :class:`mne.decoding.TimeDecoding` for decoding sensors' evoked response across time by `Jean-Remi King`_
+
+    - Add command ``mne freeview_bem_surfaces`` to quickly check BEM surfaces with Freeview by `Alex Gramfort`_.
+
+    - Add support for splitting epochs into multiple files in :func:`mne.Epochs.save` by `Mainak Jas`_ and `Alex Gramfort`_
+
+    - Add support for jointly resampling a raw object and event matrix to avoid issues with resampling status channels by `Marijn van Vliet`_
+
+    - Add new method :class:`mne.preprocessing.Xdawn` for denoising and decoding of ERP/ERF by `Alexandre Barachant`_
+
+    - Add support for plotting patterns/filters in :class:`mne.decoding.csp.CSP` and :class:`mne.decoding.base.LinearModel` by `Romain Trachel`_
+
+    - Add new object :class:`mne.decoding.base.LinearModel` for decoding M/EEG data and interpreting coefficients of linear models with patterns attribute by `Romain Trachel`_ and `Alex Gramfort`_
+
+    - Add support to append new channels to an object from a list of other objects by `Chris Holdgraf`_
+
+    - Add interactive plotting of topomap from time-frequency representation by `Jaakko Leppakangas`_
+
+    - Add ``plot_topo`` method to ``Evoked`` object by `Jaakko Leppakangas`_
+
+    - Add fetcher :mod:`mne.datasets.brainstorm` for datasets used by Brainstorm in their tutorials by `Mainak Jas`_
+
+    - Add interactive plotting of single trials by right clicking on channel name in epochs browser by `Jaakko Leppakangas`_
+
+    - New logos and logo generation script by `Daniel McCloy`_
+
+    - Add ability to plot topomap with a "skirt" (channels outside of the head circle) by `Marijn van Vliet`_
+
+    - Add multiple options to ICA infomax and extended infomax algorithms (number of subgaussian components, computation of bias, iteration status printing), enabling equivalent computations to those performed by EEGLAB by `Jair Montoya Martinez`_
+
+    - Add :func:`mne.Epochs.apply_baseline` method to ``Epochs`` objects by `Teon Brooks`_
+
+    - Add ``preload`` argument to :func:`mne.read_epochs` to enable on-demand reads from disk by `Eric Larson`_
+
+    - Big rewrite of simulation module by `Yousra Bekhti`_, `Mark Wronkiewicz`_, `Eric Larson`_ and `Alex Gramfort`_. Allows to simulate raw with artefacts (ECG, EOG) and evoked data, exploiting the forward solution. See :func:`mne.simulation.simulate_raw`, :func:`mne.simulation.simulate_evoked` and :func:`mne.simulation.simulate_sparse_stc`
+
+    - Add :func:`mne.Epochs.load_data` method to :class:`mne.Epochs` by `Teon Brooks`_
+
+    - Add support for drawing topomaps by selecting an area in :func:`mne.Evoked.plot` by `Jaakko Leppakangas`_
+
+    - Add support for finding peaks in evoked data in :func:`mne.Evoked.plot_topomap` by `Jona Sassenhagen`_ and `Jaakko Leppakangas`_
+
+    - Add source space morphing in :func:`morph_source_spaces` and :func:`SourceEstimate.to_original_src` by `Eric Larson`_ and `Denis Engemann`_
+
+   - Adapt ``corrmap`` function (Viola et al. 2009) to semi-automatically detect similar ICs across data sets by `Jona Sassenhagen`_ and `Denis Engemann`_ and `Eric Larson`_
+
+   - New ``mne flash_bem`` command to compute BEM surfaces from Flash MRI images by `Lorenzo Desantis`_, `Alex Gramfort`_ and `Eric Larson`_. See :func:`mne.bem.utils.make_flash_bem`.
+
+   - New gfp parameter in :func:`mne.Evoked.plot` method to display Global Field Power (GFP) by `Eric Larson`_.
+
+    - Add :func:`mne.report.Report.add_slider_to_section` methods to :class:`mne.report.Report` by `Teon Brooks`_
+
+BUG
+~~~
+
+    - Fix ``mne.io.add_reference_channels`` not setting ``info[nchan]`` correctly by `Federico Raimondo`_
+
+    - Fix ``mne.stats.bonferroni_correction`` reject mask output to use corrected p-values by `Denis Engemann`_
+
+    - Fix FFT filter artifacts when using short windows in overlap-add by `Eric Larson`_
+
+    - Fix picking channels from forward operator could return a channel ordering different from ``info['chs']`` by `Chris Bailey`_
+
+    - Fix dropping of events after downsampling stim channels by `Marijn van Vliet`
+
+    - Fix scaling in :func:``mne.viz.utils._setup_vmin_vmax`` by `Jaakko Leppakangas`_
+
+API
+~~~
+
+    - Rename and deprecate ``mne.viz.plot_topo`` for ``mne.viz.plot_evoked_topo`` by `Jaakko Leppakangas`_
+
+    - Deprecated :class: `mne.decoding.transformer.ConcatenateChannels` and replaced by :class: `mne.decoding.transformer.EpochsVectorizer` by `Romain Trachel`_
+
+    - Deprecated `lws` and renamed `ledoit_wolf` for the ``reg`` argument in :class:`mne.decoding.csp.CSP` by `Romain Trachel`_
+
+    - Redesigned and rewrote :func:`mne.Epochs.plot` (no backwards compatibility) during the GSOC 2015 by `Jaakko Leppakangas`_, `Mainak Jas`_, `Federico Raimondo`_ and `Denis Engemann`_
+
+    - Deprecated and renamed :func:`mne.viz.plot_image_epochs` for :func:`mne.plot.plot_epochs_image` by `Teon Brooks`_
+
+    - ``picks`` argument has been added to :func:`mne.time_frequency.tfr_morlet`, :func:`mne.time_frequency.tfr_multitaper` by `Teon Brooks`_
+
+    - :func:`mne.io.Raw.preload_data` has been deprecated for :func:`mne.io.Raw.load_data` by `Teon Brooks`_
+
+    - ``RawBrainVision`` objects now always have event channel ``'STI 014'``, and recordings with no events will have this channel set to zero by `Eric Larson`_
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number of commits):
+
+   269  Eric Larson
+   243  Jaakko Leppakangas
+   192  Alexandre Gramfort
+   128  Denis A. Engemann
+   111  Jona Sassenhagen
+   107  Mark Wronkiewicz
+    97  Teon Brooks
+    81  Lorenzo De Santis
+    55  Yousra Bekhti
+    54  Jean-Remi King
+    48  Romain Trachel
+    45  Mainak Jas
+    40  Alexandre Barachant
+    32  Marijn van Vliet
+    27  jaeilepp
+    26  jmontoyam
+    22  Chris Holdgraf
+    16  Christopher J. Bailey
+     7  Christian Brodbeck
+     5  Alan Leggitt
+     5  Roan LaPlante
+     5  natalieklein
+     3  Daniel Strohmeier
+     3  Fede Raimondo
+     3  unknown
+     2  Dan G. Wakeman
+     2  Daniel McCloy
+     2  Fede
+     2  Ross Maddox
+     2  dgwakeman
+     2  sassenha
+     1  Jussi Nurminen
+     1  drammock
+     1  jona
+
+.. _changes_0_9:
+
+Version 0.9
+-----------
+
+Changelog
+~~~~~~~~~
+
+   - Add support for mayavi figures in ``add_section`` method in Report by `Mainak Jas`_
+
+   - Add extract volumes of interest from freesurfer segmentation and setup as volume source space by `Alan Leggitt`_
+
+   - Add support to combine source spaces of different types by `Alan Leggitt`_
+
+   - Add support for source estimate for mixed source spaces by `Alan Leggitt`_
+
+   - Add ``SourceSpaces.save_as_volume`` method by `Alan Leggitt`_
+
+   - Automatically compute proper box sizes when generating layouts on the fly by `Marijn van Vliet`_
+
+   - Average evoked topographies across time points by `Denis Engemann`_
+
+   - Add option to Report class to save images as vector graphics (SVG) by `Denis Engemann`_
+
+   - Add events count to ``mne.viz.plot_events`` by `Denis Engemann`_
+
+   - Add support for stereotactic EEG (sEEG) channel type by `Marmaduke Woodman`_
+
+   - Add support for montage files by `Denis Engemann`_, `Marijn van Vliet`_, `Jona Sassenhagen`_, `Alex Gramfort`_ and `Teon Brooks`_
+
+   - Add support for spatiotemporal permutation clustering on sensors by `Denis Engemann`_
+
+   - Add support for multitaper time-frequency analysis by `Hari Bharadwaj`_
+
+   - Add Stockwell (S) transform for time-frequency representations by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Add reading and writing support for time frequency data (AverageTFR objects) by  `Denis Engemann`_
+
+   - Add reading and writing support for digitizer data, and function for adding dig points to info by `Teon Brooks`_
+
+   - Add  ``plot_projs_topomap`` method to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_
+
+   - Add EEG (based on spherical splines) and MEG (based on field interpolation) bad channel interpolation method to ``Raw``, ``Epochs`` and ``Evoked`` objects
+     by `Denis Engemann`_ and `Mainak Jas`_
+
+   - Add parameter to ``whiten_evoked``, ``compute_whitener`` and ``prepare_noise_cov`` to set the exact rank by `Martin Luessi`_ and `Denis Engemann`_
+
+   - Add fiff I/O for processing history and MaxFilter info by `Denis Engemann`_ and `Eric Larson`_
+
+   - Add automated regularization with support for multiple sensor types to ``compute_covariance`` by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Add ``Evoked.plot_white`` method to diagnose the quality of the estimated noise covariance and its impact on spatial whitening by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Add ``mne.evoked.grand_average`` function to compute grand average of Evoked data while interpolating bad EEG channels if necessary by `Mads Jensen`_ and `Alex Gramfort`_
+
+   - Improve EEG referencing support and add support for bipolar referencing by `Marijn van Vliet`_ and `Alex Gramfort`_
+
+   - Enable TFR calculation on Evoked objects by `Eric Larson`_
+
+   - Add support for combining Evoked datasets with arbitrary weights (e.g., for oddball paradigms) by `Eric Larson`_ and `Alex Gramfort`_
+
+   - Add support for concatenating a list of Epochs objects by `Denis Engemann`_
+
+   - Labels support subtraction (``label_1 - label_2``) by `Christian Brodbeck`_
+
+   - Add GeneralizationAcrossTime object with support for cross-condition generalization by `Jean-Remi King`_ and `Denis Engemann`_
+
+   - Add support for single dipole fitting by `Eric Larson`_
+
+   - Add support for spherical models in forward calculations by `Eric Larson`_
+
+   - Add support for SNR estimation by `Eric Larson`_
+
+   - Add support for Savitsky-Golay filtering of Evoked and Epochs by `Eric Larson`_
+
+   - Add support for adding an empty reference channel to data by `Teon Brooks`_
+
+   - Add reader function ``mne.io.read_raw_fif`` for Raw FIF files by `Teon Brooks`_
+
+   - Add example of creating MNE objects from arbitrary data and NEO files by `Jaakko Leppakangas`_
+
+   - Add ``plot_psd`` and ``plot_psd_topomap`` methods to epochs by `Yousra Bekhti`_, `Eric Larson`_ and `Denis Engemann`_
+
+   - ``evoked.pick_types``, ``epochs.pick_types``, and ``tfr.pick_types`` added by `Eric Larson`_
+
+   - ``rename_channels`` and ``set_channel_types`` added as methods to ``Raw``, ``Epochs`` and ``Evoked`` objects by `Teon Brooks`_
+
+   - Add RAP-MUSIC inverse method by `Yousra Bekhti`_ and `Alex Gramfort`_
+
+   - Add ``evoked.as_type`` to  allow remapping data in MEG channels to virtual magnetometer or gradiometer channels by `Mainak Jas`_
+
+   - Add :func:`mne.report.Report.add_bem_to_section`, :func:`mne.report.Report.add_htmls_to_section` methods to :class:`mne.report.Report` by `Teon Brooks`_
+
+   - Add support for KIT epochs files with ``read_epochs_kit`` by `Teon Brooks`_
+
+   - Add whitening plots for evokeds to ``mne.Report`` by `Mainak Jas`_
+
+   - Add ``DigMontage`` class and reader to interface with digitization info by `Teon Brooks`_ and `Christian Brodbeck`_
+
+   - Add ``set_montage`` method to the ``Raw``, ``Epochs``, and ``Evoked`` objects by `Teon Brooks`_ and `Denis Engemann`_
+
+   - Add support for capturing sensor positions when clicking on an image by `Chris Holdgraf`_
+
+   - Add support for custom sensor positions when creating Layout objects by `Chris Holdgraf`_
+
+BUG
+~~~
+
+   - Fix energy conservation for STFT with tight frames by `Daniel Strohmeier`_
+
+   - Fix incorrect data matrix when tfr was plotted with parameters ``tmin``, ``tmax``, ``fmin`` and ``fmax`` by `Mainak Jas`_
+
+   - Fix channel names in topomaps by `Alex Gramfort`_
+
+   - Fix mapping of ``l_trans_bandwidth`` (to low frequency) and ``h_trans_bandwidth`` (to high frequency) in ``_BaseRaw.filter`` by `Denis Engemann`_
+
+   - Fix scaling source spaces when distances have to be recomputed by `Christian Brodbeck`_
+
+   - Fix repeated samples in client to FieldTrip buffer by `Mainak Jas`_ and `Federico Raimondo`_
+
+   - Fix highpass and lowpass units read from Brainvision vhdr files by `Alex Gramfort`_
+
+   - Add missing attributes for BrainVision and KIT systems needed for resample by `Teon Brooks`_
+
+   - Fix file extensions of SSP projection files written by mne commands (from _proj.fif to -prof.fif) by `Alex Gramfort`_
+
+   - Generating EEG layouts no longer requires digitization points by `Marijn van Vliet`_
+
+   - Add missing attributes to BTI, KIT, and BrainVision by `Eric Larson`_
+
+   - The API change to the edf, brainvision, and egi break backwards compatibility for when importing eeg data by `Teon Brooks`_
+
+   - Fix bug in ``mne.viz.plot_topo`` if ylim was passed for single sensor layouts by `Denis Engemann`_
+
+   - Average reference projections will no longer by automatically added after applying a custom EEG reference by `Marijn van Vliet`_
+
+   - Fix picks argument to filter in n dimensions (affects FilterEstimator), and highpass filter in FilterEstimator by `Mainak Jas`_
+
+   - Fix beamformer code LCMV/DICS for CTF data with reference channels by `Denis Engemann`_ and `Alex Gramfort`_
+
+   - Fix scalings for bad EEG channels in ``mne.viz.plot_topo`` by `Marijn van Vliet`_
+
+   - Fix EGI reading when no events are present by `Federico Raimondo`_
+
+   - Add functionality to determine plot limits automatically or by data percentiles by `Mark Wronkiewicz`_
+
+   - Fix bug in mne.io.edf where the channel offsets were ommitted in the voltage calculations by `Teon Brooks`_
+
+   - Decouple section ordering in command-line from python interface for mne-report by `Mainak Jas`_
+
+   - Fix bug with ICA resetting by `Denis Engemann`_
+
+API
+~~~
+
+   - apply_inverse functions have a new boolean parameter ``prepared`` which saves computation time by calling ``prepare_inverse_operator`` only if it is False
+
+   - find_events and read_events functions have a new parameter ``mask`` to set some bits to a don't care state by `Teon Brooks`_
+
+   - New channels module including layouts, electrode montages, and neighbor definitions of sensors which deprecates ``mne.layouts`` by `Denis Engemann`_
+
+   - ``read_raw_brainvision``, ``read_raw_edf``, ``read_raw_egi`` all use a standard montage import by `Teon Brooks`_
+
+   - Fix missing calibration factors for ``mne.io.egi.read_raw_egi`` by `Denis Engemann`_ and `Federico Raimondo`_
+
+   - Allow multiple filename patterns as a list (e.g., \*raw.fif and \*-eve.fif) to be parsed by mne report in ``Report.parse_folder()`` by `Mainak Jas`_
+
+   - ``read_hsp``, ``read_elp``, and ``write_hsp``, ``write_mrk`` were removed and made private by `Teon Brooks`_
+
+   - When computing the noise covariance or MNE inverse solutions, the rank is estimated empirically using more sensitive thresholds, which stabilizes results by `Denis Engemann`_ and `Eric Larson`_ and `Alex Gramfort`_
+
+   - Raw FIFF files can be preloaded after class instantiation using ``raw.preload_data()``
+
+   - Add ``label`` parameter to ``apply_inverse`` by `Teon Brooks`_
+
+   - Deprecated ``label_time_courses`` for ``in_label`` method in `SourceEstimate` by `Teon Brooks`_
+
+   - Deprecated ``as_data_frame`` for ``to_data_frame`` by `Chris Holdgraf`_
+
+   - Add ``transform``, ``unit`` parameters to ``read_montage`` by `Teon Brooks`_
+
+   - Deprecated ``fmin, fmid, fmax`` in stc.plot and added ``clim`` by `Mark Wronkiewicz`_
+
+   - Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consquence, ``Raw.plot_raw_psds`` has been deprecated.
+
+   - ``Raw`` instances returned by ``mne.forward.apply_forward_raw`` now always have times starting from
+     zero to be consistent with all other ``Raw`` instances. To get the former ``start`` and ``stop`` times,
+     use ``raw.first_samp / raw.info['sfreq']`` and ``raw.last_samp / raw.info['sfreq']``.
+
+   - ``pick_types_evoked`` has been deprecated in favor of ``evoked.pick_types``.
+
+   - Deprecated changing the sensor type of channels in ``rename_channels`` by `Teon Brooks`_
+
+   - CUDA is no longer initialized at module import, but only when first used.
+
+   - ``add_figs_to_section`` and ``add_images_to_section`` now have a ``textbox`` parameter to add comments to the image by `Teon Brooks`_
+
+   - Deprecated ``iir_filter_raw`` for ``fit_iir_model_raw``.
+
+   - Add ``montage`` parameter to the ``create_info`` function to create the info using montages by `Teon Brooks`_
+
+Authors
+~~~~~~~~~
+
+The committer list for this release is the following (preceded by number of commits):
+
+   515  Eric Larson
+   343  Denis A. Engemann
+   304  Alexandre Gramfort
+   300  Teon Brooks
+   142  Mainak Jas
+   119  Jean-Remi King
+    77  Alan Leggitt
+    75  Marijn van Vliet
+    63  Chris Holdgraf
+    57  Yousra Bekhti
+    49  Mark Wronkiewicz
+    44  Christian Brodbeck
+    30  Jona Sassenhagen
+    29  Hari Bharadwaj
+    27  Clément Moutard
+    24  Ingoo Lee
+    18  Marmaduke Woodman
+    16  Martin Luessi
+    10  Jaakko Leppakangas
+     9  Andrew Dykstra
+     9  Daniel Strohmeier
+     7  kjs
+     6  Dan G. Wakeman
+     5  Federico Raimondo
+     3  Basile Pinsard
+     3  Christoph Dinh
+     3  Hafeza Anevar
+     2  Martin Billinger
+     2  Roan LaPlante
+     1  Manoj Kumar
+     1  Matt Tucker
+     1  Romain Trachel
+     1  mads jensen
+     1  sviter
 
 .. _changes_0_8:
 
@@ -11,25 +407,25 @@ Changelog
 
    - Add Python3 support by `Nick Ward`_, `Alex Gramfort`_, `Denis Engemann`_, and `Eric Larson`_
 
-   - Add `get_peak` method for evoked and stc objects by  `Denis Engemann`_
+   - Add ``get_peak`` method for evoked and stc objects by  `Denis Engemann`_
 
-   - Add `iter_topography` function for radically simplified custom sensor topography plotting by `Denis Engemann`_
+   - Add ``iter_topography`` function for radically simplified custom sensor topography plotting by `Denis Engemann`_
 
    - Add field line interpolation by `Eric Larson`_
 
-   - Add full provenance tacking for epochs and improve `drop_log` by `Tal Linzen`_, `Alex Gramfort`_ and `Denis Engemann`_
+   - Add full provenance tacking for epochs and improve ``drop_log`` by `Tal Linzen`_, `Alex Gramfort`_ and `Denis Engemann`_
 
-   - Add systematic contains method to Raw, Epochs and Evoked for channel type membership testing by `Denis Engemann`_
+   - Add systematic contains method to ``Raw``, ``Epochs`` and ``Evoked`` for channel type membership testing by `Denis Engemann`_
 
    - Add fiff unicode writing and reading support by `Denis Engemann`_
 
    - Add 3D MEG/EEG field plotting function and evoked method by `Denis Engemann`_ and  `Alex Gramfort`_
 
-   - Add consistent channel-dropping methods to Raw, Epochs and Evoked by `Denis Engemann`_ and  `Alex Gramfort`_
+   - Add consistent channel-dropping methods to ``Raw``, ``Epochs`` and ``Evoked`` by `Denis Engemann`_ and  `Alex Gramfort`_
 
-   - Add `equalize_channnels` function to set common channels for a list of Raw, Epochs, or Evoked objects by `Denis Engemann`_
+   - Add ``equalize_channnels`` function to set common channels for a list of ``Raw``, ``Epochs``, or ``Evoked`` objects by `Denis Engemann`_
 
-   - Add `plot_events` function to visually display paradigm by `Alex Gramfort`_
+   - Add ``plot_events`` function to visually display paradigm by `Alex Gramfort`_
 
    - Improved connectivity circle plot by `Martin Luessi`_
 
@@ -39,21 +435,21 @@ Changelog
 
    - Add ability to add patch information to source spaces by `Eric Larson`_
 
-   - Add `split_label` function to divide labels into multiple parts by `Christian Brodbeck`_
+   - Add ``split_label`` function to divide labels into multiple parts by `Christian Brodbeck`_
 
-   - Add `color` attribute to `Label` objects by `Christian Brodbeck`_
+   - Add ``color`` attribute to ``Label`` objects by `Christian Brodbeck`_
 
-   - Add 'max' mode for extract_label_time_course by `Mads Jensen`_
+   - Add ``max`` mode for ``extract_label_time_course`` by `Mads Jensen`_
 
-   - Add `rename_channels` function to change channel names and types in info object by `Dan Wakeman`_ and `Denis Engemann`_
+   - Add ``rename_channels`` function to change channel names and types in info object by `Dan Wakeman`_ and `Denis Engemann`_
 
-   - Add  `compute_ems` function to extract the time course of experimental effects by `Denis Engemann`_, `Sébastien Marti`_ and `Alex Gramfort`_
+   - Add  ``compute_ems`` function to extract the time course of experimental effects by `Denis Engemann`_, `Sébastien Marti`_ and `Alex Gramfort`_
 
-   - Add option to expand Labels defined in a source space to the original surface (`Label.fill()`) by `Christian Brodbeck`_
+   - Add option to expand Labels defined in a source space to the original surface (``Label.fill()``) by `Christian Brodbeck`_
 
    - GUIs can be invoked form the command line using `$ mne coreg` and `$ mne kit2fiff` by `Christian Brodbeck`_
 
-   - Add `add_channels_epochs` function to combine different recordings at the Epochs level by `Christian Brodbeck`_ and `Denis Engemann`_
+   - Add ``add_channels_epochs`` function to combine different recordings at the Epochs level by `Christian Brodbeck`_ and `Denis Engemann`_
 
    - Add support for EGI Netstation simple binary files by `Denis Engemann`_
 
@@ -67,29 +463,29 @@ Changelog
 
    - Add color and event_id with legend options in plot_events in viz.py by `Cathy Nangini`_
 
-   - Add `events_list` parameter to `mne.concatenate_raws` to concatenate events corresponding to runs by `Denis Engemann`_
+   - Add ``events_list`` parameter to ``mne.concatenate_raws`` to concatenate events corresponding to runs by `Denis Engemann`_
 
-   - Add `read_ch_connectivity` function and `ch_neighbor_connectivity` to read FieldTrip neighbor template .mat files and compute between sensor adjacency matrices by `Denis Engemann`_
+   - Add ``read_ch_connectivity`` function to read FieldTrip neighbor template .mat files and obtain sensor adjacency matrices by `Denis Engemann`_
 
    - Add display of head in helmet from -trans.fif file to check coregistration quality by `Mainak Jas`_
 
-   - Add `raw.add_events` to allow adding events to a raw file by `Eric Larson`_
+   - Add ``raw.add_events`` to allow adding events to a raw file by `Eric Larson`_
 
-   - Add `plot_image` method to Evoked object to display data as images by `JR King`_ and `Alex Gramfort`_ and `Denis Engemann`_
+   - Add ``plot_image`` method to Evoked object to display data as images by `Jean-Remi King`_ and `Alex Gramfort`_ and `Denis Engemann`_
 
    - Add BCI demo with CSP on motor imagery by `Martin Billinger`_
 
-   - New ICA API with unified methods for processing Raw, Epochs and Evoked objects by `Denis Engemann`_
+   - New ICA API with unified methods for processing ``Raw``, ``Epochs`` and ``Evoked`` objects by `Denis Engemann`_
 
    - Apply ICA at the evoked stage by `Denis Engemann`_
 
    - New ICA methods for visualizing unmixing quality, artifact detection and rejection by `Denis Engemann`_
 
-   - Add 'pick_channels' and 'drop_channels' mixin class to pick and drop channels from Raw, Epochs, and Evoked objects by `Andrew Dykstra`_ and `Denis Engemann`_
+   - Add ``pick_channels`` and ``drop_channels`` mixin class to pick and drop channels from ``Raw``, ``Epochs``, and ``Evoked`` objects by `Andrew Dykstra`_ and `Denis Engemann`_
 
-   - Add 'EvokedArray' class to create an Evoked object from an array by 'Andrew Dykstra'_
+   - Add ``EvokedArray`` class to create an Evoked object from an array by `Andrew Dykstra`_
 
-   - Add `plot_bem` method to visualize BEM contours on MRI anatomical images by `Mainak Jas`_ and `Alex Gramfort`_
+   - Add ``plot_bem`` method to visualize BEM contours on MRI anatomical images by `Mainak Jas`_ and `Alex Gramfort`_
 
    - Add automated ECG detection using cross-trial phase statistics by `Denis Engemann`_ and `Juergen Dammers`_
 
@@ -101,13 +497,13 @@ Changelog
 
    - Add computation of point spread and cross-talk functions for MNE type solutions by `Alex Gramfort`_ and `Olaf Hauk`_
 
-   - Add mask parameter to `plot_evoked_topomap` and `evoked.plot_topomap` by `Denis Engemann`_ and `Alex Gramfort`_
+   - Add mask parameter to `plot_evoked_topomap` and ``evoked.plot_topomap`` by `Denis Engemann`_ and `Alex Gramfort`_
 
    - Add infomax and extended infomax ICA by `Denis Engemann`_, `Juergen Dammers`_ and `Lukas Breuer`_ and `Federico Raimondo`_
 
    - Aesthetically redesign interpolated topography plots by `Denis Engemann`_ and `Alex Gramfort`_
 
-   - Simplify sensor space time-frequency analysis API with `tfr_morlet` function by `Alex Gramfort`_ and `Denis Engemann`_
+   - Simplify sensor space time-frequency analysis API with ``tfr_morlet`` function by `Alex Gramfort`_ and `Denis Engemann`_
 
    - Add new somatosensory MEG dataset with nice time-frequency content by `Alex Gramfort`_
 
@@ -122,7 +518,7 @@ Changelog
 BUG
 ~~~
 
-   - Fix incorrect `times` attribute when stc was computed using `apply_inverse` after decimation at epochs stage for certain, arbitrary sample frequencies by `Denis Engemann`_
+   - Fix incorrect ``times`` attribute when stc was computed using ``apply_inverse`` after decimation at epochs stage for certain, arbitrary sample frequencies by `Denis Engemann`_
 
    - Fix corner case error for step-down-in-jumps permutation test (when step-down threshold was high enough to include all clusters) by `Eric Larson`_
 
@@ -143,21 +539,21 @@ API
 
    - Deprecate Epochs.drop_picks in favor of a new method called drop_channels
 
-   - Deprecate `labels_from_parc` and `parc_from_labels` in favor of `read_labels_from_annot` and `write_labels_to_annot`
+   - Deprecate ``labels_from_parc`` and ``parc_from_labels`` in favor of ``read_labels_from_annot`` and ``write_labels_to_annot``
 
-   - The default of the new add_dist option of `setup_source_space` to add patch information will change from False to True in MNE-Python 0.9
+   - The default of the new add_dist option of ``setup_source_space`` to add patch information will change from False to True in MNE-Python 0.9
 
-   - Deprecate `read_evoked` and `write_evoked` in favor of `read_evokeds` and `write_evokeds`. read_evokeds will return all Evoked instances in a file by default.
+   - Deprecate ``read_evoked`` and ``write_evoked`` in favor of ``read_evokeds`` and ``write_evokeds``. read_evokeds will return all `Evoked` instances in a file by default.
 
-   - Deprecate `setno` in favor of `condition` in the initialization of an Evoked instance. This affects 'mne.fiff.Evoked' and 'read_evokeds', but not 'read_evoked'.
+   - Deprecate ``setno`` in favor of ``condition`` in the initialization of an Evoked instance. This affects ``mne.fiff.Evoked`` and ``read_evokeds``, but not ``read_evoked``.
 
-   - Deprecate `mne.fiff` module, use `mne.io` instead e.g. `mne.io.Raw` instead of `mne.fiff.Raw`.
+   - Deprecate ``mne.fiff`` module, use ``mne.io`` instead e.g. ``mne.io.Raw`` instead of ``mne.fiff.Raw``.
 
-   - Pick functions (e.g., `pick_types`) are now in the mne namespace (e.g. use `mne.pick_types`).
+   - Pick functions (e.g., ``pick_types``) are now in the mne namespace (e.g. use ``mne.pick_types``).
 
-   - Deprecated ICA methods specfific to one container type. Use ICA.fit, ICA.get_sources ICA.apply and ICA.plot_XXX for processing Raw, Epochs and Evoked objects.
+   - Deprecated ICA methods specific to one container type. Use ICA.fit, ICA.get_sources ICA.apply and ICA.plot_XXX for processing Raw, Epochs and Evoked objects.
 
-   - The default smoothing method for `mne.stc_to_label` will change in v0.9, and the old method is deprecated.
+   - The default smoothing method for ``mne.stc_to_label`` will change in v0.9, and the old method is deprecated.
 
    - As default, for ICA the maximum number of PCA components equals the number of channels passed. The number of PCA components used to reconstruct the sensor space signals now defaults to the maximum number of PCA components estimated.
 
@@ -219,7 +615,7 @@ Changelog
 
    - Decoding with Common Spatial Patterns (CSP) by `Romain Trachel`_ and `Alex Gramfort`_
 
-   - Add ICA plot_topomap function and method for displaying the spatial sensitivity of ICA sources by `Denis Engemann`_
+   - Add ICA ``plot_topomap`` function and method for displaying the spatial sensitivity of ICA sources by `Denis Engemann`_
 
    - Plotting multiple brain views at once by `Eric Larson`_
 
@@ -245,7 +641,7 @@ Changelog
 
    - Add `ico` and `oct` source space creation in native Python by `Eric Larson`_
 
-   - Add interactive rejection of bad trials in `plot_epochs` by `Denis Engemann`_
+   - Add interactive rejection of bad trials in ``plot_epochs`` by `Denis Engemann`_
 
    - Add morph map calculation by `Eric Larson`_ and `Martin Luessi`_
 
@@ -294,7 +690,7 @@ Changelog
 API
 ~~~
 
-   - The pick_normal parameter for minimum norm solvers has been renamed as `pick_ori` and normal orientation picking is now achieved by passing the value "normal" for the `pick_ori` parameter.
+   - The pick_normal parameter for minimum norm solvers has been renamed as ``pick_ori`` and normal orientation picking is now achieved by passing the value "normal" for the `pick_ori` parameter.
 
    - ICA objects now expose the measurment info of the object fitted.
 
@@ -302,9 +698,9 @@ API
 
    - Removed deprecated read/write_stc/w, use SourceEstimate methods instead
 
-   - The `chs` argument in `mne.layouts.find_layout` is deprecated and will be removed in MNE-Python 0.9. Use `info` instead.
+   - The ``chs`` argument in ``mne.layouts.find_layout`` is deprecated and will be removed in MNE-Python 0.9. Use ``info`` instead.
 
-   - `plot_evoked` and `Epochs.plot` now open a new figure by default. To plot on an existing figure please specify the `axes` parameter.
+   - ``plot_evoked`` and ``Epochs.plot`` now open a new figure by default. To plot on an existing figure please specify the `axes` parameter.
 
 
 Authors
@@ -385,7 +781,7 @@ Changelog
 
    - Events now contain the pre-event stim channel value in the middle column, by `Christian Brodbeck`_
 
-   - New function `mne.find_stim_steps` for finding all steps in a stim channel by `Christian Brodbeck`_
+   - New function ``mne.find_stim_steps`` for finding all steps in a stim channel by `Christian Brodbeck`_
 
    - Get information about FIFF files using mne.fiff.show_fiff() by `Eric Larson`_
 
@@ -407,7 +803,7 @@ Changelog
 
    - Support selective parameter updating in functions taking dicts as arguments by `Denis Engemann`_
 
-   - New ICA method `sources_as_epochs` to create Epochs in ICA space by `Denis Engemann`_
+   - New ICA method ``sources_as_epochs`` to create Epochs in ICA space by `Denis Engemann`_
 
    - New method in Evoked and Epoch classes to shift time scale by `Mainak Jas`_
 
@@ -452,11 +848,11 @@ Changelog
 API
 ~~~
 
-   - Deprecated use of fiff.pick_types without specifying exclude -- use either [] (none), 'bads' (bad channels), or a list of string (channel names).
+   - Deprecated use of fiff.pick_types without specifying exclude -- use either [] (none), ``bads`` (bad channels), or a list of string (channel names).
 
    - Depth bias correction in dSPM/MNE/sLORETA make_inverse_operator is now done like in the C code using only gradiometers if present, else magnetometers, and EEG if no MEG channels are present.
 
-   - Fixed-orientation inverse solutions need to be made using 'fixed=True' option (using non-surface-oriented forward solutions if no depth weighting is used) to maintain compatibility with MNE C code.
+   - Fixed-orientation inverse solutions need to be made using `fixed=True` option (using non-surface-oriented forward solutions if no depth weighting is used) to maintain compatibility with MNE C code.
 
    - Raw.save() will only overwrite the destination file, if it exists, if option overwrite=True is set.
 
@@ -711,8 +1107,8 @@ Authors
 The committer list for this release is the following (preceded by number
 of commits):
 
-    * 80  Alexandre Gramfort
-    * 51  Martin Luessi
+   * 80  Alexandre Gramfort
+   * 51  Martin Luessi
 
 Version 0.2
 -----------
@@ -748,28 +1144,28 @@ Authors
 The committer list for this release is the following (preceded by number
 of commits):
 
-    * 33  Alexandre Gramfort
-    * 12  Martin Luessi
-    *  2  Yaroslav Halchenko
-    *  1  Manfred Kitzbichler
+   * 33  Alexandre Gramfort
+   * 12  Martin Luessi
+   *  2  Yaroslav Halchenko
+   *  1  Manfred Kitzbichler
 
 .. _Alex Gramfort: http://alexandre.gramfort.net
 
-.. _Martin Luessi: http://www.nmr.mgh.harvard.edu/martinos/people/showPerson.php?people_id=1600
+.. _Martin Luessi: https://www.martinos.org/user/8245
 
 .. _Yaroslav Halchenko: http://www.onerussian.com/
 
 .. _Daniel Strohmeier: http://www.tu-ilmenau.de/bmti/fachgebiete/biomedizinische-technik/dipl-ing-daniel-strohmeier/
 
-.. _Eric Larson: http://faculty.washington.edu/larsoner/
+.. _Eric Larson: http://larsoner.com
 
 .. _Denis Engemann: https://github.com/dengemann
 
-.. _Christian Brodbeck: https://github.com/christianmbrodbeck
+.. _Christian Brodbeck: https://github.com/christianbrodbeck
 
 .. _Simon Kornblith: http://simonster.com
 
-.. _Teon Brooks: https://files.nyu.edu/tlb331/public/
+.. _Teon Brooks: http://sites.google.com/a/nyu.edu/teon/
 
 .. _Mainak Jas: http://ltl.tkk.fi/wiki/Mainak_Jas
 
@@ -777,7 +1173,7 @@ of commits):
 
 .. _Andrew Dykstra: https://github.com/adykstra
 
-.. _Romain Trachel: http://www-sop.inria.fr/athena/Site/RomainTrachel
+.. _Romain Trachel: http://www.lscp.net/braware/trachelBr.html
 
 .. _Christopher Dinh: https://github.com/chdinh
 
@@ -787,7 +1183,7 @@ of commits):
 
 .. _Roan LaPlante: https://github.com/aestrivex
 
-.. _Mads Jensen: http://cnru.dk/people/mads-jensen
+.. _Mads Jensen: https://github.com/MadsJensen
 
 .. _Dan Wakeman: https://github.com/dgwakeman
 
@@ -799,7 +1195,7 @@ of commits):
 
 .. _Cathy Nangini: https://github.com/KatiRG
 
-.. _JR King: https://github.com/kingjr
+.. _Jean-Remi King: https://github.com/kingjr
 
 .. _Juergen Dammers: https://github.com/jdammers
 
@@ -808,3 +1204,37 @@ of commits):
 .. _Lukas Breuer: http://www.researchgate.net/profile/Lukas_Breuer
 
 .. _Federico Raimondo: https://github.com/fraimondo
+
+.. _Alan Leggitt: https://github.com/leggitta
+
+.. _Marijn van Vliet: https://github.com/wmvanvliet
+
+.. _Marmaduke Woodman: https://github.com/maedoc
+
+.. _Jona Sassenhagen: https://github.com/jona-sassenhagen
+
+.. _Hari Bharadwaj: http://www.haribharadwaj.com
+
+.. _Chris Holdgraf: http://chrisholdgraf.com
+
+.. _Jaakko Leppakangas: https://github.com/jaeilepp
+
+.. _Yousra Bekhti: https://www.linkedin.com/pub/yousra-bekhti/56/886/421
+
+.. _Mark Wronkiewicz: http://ilabs.washington.edu/graduate-students/bio/i-labs-mark-wronkiewicz
+
+.. _Sébastien Marti: http://www.researchgate.net/profile/Sebastien_Marti
+
+.. _Chris Bailey: https://github.com/cjayb
+
+.. _Ross Maddox: http://faculty.washington.edu/rkmaddox/
+
+.. _Alexandre Barachant: http://alexandre.barachant.org
+
+.. _Daniel McCloy: http://dan.mccloy.info
+
+.. _Jair Montoya Martinez: https://github.com/jmontoyam
+
+.. _Samu Taulu: http://ilabs.washington.edu/institute-faculty/bio/i-labs-samu-taulu-dsc
+
+.. _Lorenzo Desantis: https://github.com/lorenzo-desantis/
diff --git a/examples/README.txt b/examples/README.txt
index aebe569..bbefdad 100644
--- a/examples/README.txt
+++ b/examples/README.txt
@@ -1,6 +1,9 @@
+Examples Gallery
+================
 
-General examples
--------------------
-
-General-purpose and introductory examples to MNE.
+.. contents:: Contents
+   :local:
+   :depth: 2
 
+Introductory Examples
+---------------------
diff --git a/examples/connectivity/plot_cwt_sensor_connectivity.py b/examples/connectivity/plot_cwt_sensor_connectivity.py
index 2109194..5529cb5 100644
--- a/examples/connectivity/plot_cwt_sensor_connectivity.py
+++ b/examples/connectivity/plot_cwt_sensor_connectivity.py
@@ -12,19 +12,19 @@ domain using Morlet wavelets and the debiased Squared Weighted Phase Lag Index
     physiological data in the presence of volume-conduction, noise and
     sample-size bias" NeuroImage, vol. 55, no. 4, pp. 1548-1565, Apr. 2011.
 """
-
 # Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+
 import mne
 from mne import io
 from mne.connectivity import spectral_connectivity, seed_target_indices
 from mne.datasets import sample
-from mne.viz import plot_topo_tfr
+from mne.time_frequency import AverageTFR
+
+print(__doc__)
 
 ###############################################################################
 # Set parameters
@@ -46,7 +46,8 @@ picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
 # Create epochs for left-visual condition
 event_id, tmin, tmax = 3, -0.2, 0.5
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
+                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
+                    preload=True)
 
 # Use 'MEG 2343' as seed
 seed_ch = 'MEG 2343'
@@ -63,7 +64,8 @@ cwt_n_cycles = cwt_frequencies / 7.
 
 # Run the connectivity analysis using 2 parallel jobs
 sfreq = raw.info['sfreq']  # the sampling frequency
-con, freqs, times, _, _ = spectral_connectivity(epochs, indices=indices,
+con, freqs, times, _, _ = spectral_connectivity(
+    epochs, indices=indices,
     method='wpli2_debiased', mode='cwt_morlet', sfreq=sfreq,
     cwt_frequencies=cwt_frequencies, cwt_n_cycles=cwt_n_cycles, n_jobs=2)
 
@@ -71,9 +73,9 @@ con, freqs, times, _, _ = spectral_connectivity(epochs, indices=indices,
 con[np.where(indices[1] == seed)] = 1.0
 
 # Show topography of connectivity from seed
-import matplotlib.pyplot as plt
 title = 'WPLI2 - Visual - Seed %s' % seed_ch
 
 layout = mne.find_layout(epochs.info, 'meg')  # use full layout
-plot_topo_tfr(epochs, con, freqs, layout=layout, title=title)
-plt.show()
+
+tfr = AverageTFR(epochs.info, con, times, freqs, len(epochs))
+tfr.plot_topo(fig_facecolor='w', font_color='k', border='k')
diff --git a/examples/connectivity/plot_mne_inverse_coherence_epochs.py b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
index 03d8bd6..d093572 100644
--- a/examples/connectivity/plot_mne_inverse_coherence_epochs.py
+++ b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
@@ -5,17 +5,15 @@ Compute coherence in source space using a MNE inverse solution
 
 This examples computes the coherence between a seed in the left
 auditory cortex and the rest of the brain based on single-trial
-MNE-dSPM inverse soltions.
+MNE-dSPM inverse solutions.
 
 """
-
 # Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
@@ -23,6 +21,7 @@ from mne.minimum_norm import (apply_inverse, apply_inverse_epochs,
                               read_inverse_operator)
 from mne.connectivity import seed_target_indices, spectral_connectivity
 
+print(__doc__)
 
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
@@ -66,8 +65,8 @@ stc_label = stc.in_label(label_lh)
 
 # Find number and index of vertex with most power
 src_pow = np.sum(stc_label.data ** 2, axis=1)
-seed_vertno = stc_label.vertno[0][np.argmax(src_pow)]
-seed_idx = np.searchsorted(stc.vertno[0], seed_vertno)  # index in original stc
+seed_vertno = stc_label.vertices[0][np.argmax(src_pow)]
+seed_idx = np.searchsorted(stc.vertices[0], seed_vertno)  # index in orig stc
 
 # Generate index parameter for seed-based connectivity analysis
 n_sources = stc.data.shape[0]
@@ -94,8 +93,8 @@ sfreq = raw.info['sfreq']  # the sampling frequency
 # lower variance but is slower). By using faverage=True, we directly
 # average the coherence in the alpha and beta band, i.e., we will only
 # get 2 frequency bins
-coh, freqs, times, n_epochs, n_tapers = spectral_connectivity(stcs,
-    method='coh', mode='fourier', indices=indices,
+coh, freqs, times, n_epochs, n_tapers = spectral_connectivity(
+    stcs, method='coh', mode='fourier', indices=indices,
     sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, n_jobs=2)
 
 print('Frequencies in Hz over which coherence was averaged for alpha: ')
@@ -108,11 +107,12 @@ print(freqs[1])
 # Note: We use a hack to save the frequency axis as time
 tmin = np.mean(freqs[0])
 tstep = np.mean(freqs[1]) - tmin
-coh_stc = mne.SourceEstimate(coh, vertices=stc.vertno, tmin=1e-3 * tmin,
+coh_stc = mne.SourceEstimate(coh, vertices=stc.vertices, tmin=1e-3 * tmin,
                              tstep=1e-3 * tstep, subject='sample')
 
 # Now we can visualize the coherence using the plot method
-brain = coh_stc.plot('sample', 'inflated', 'rh', fmin=0.25, fmid=0.4,
-                     fmax=0.65, time_label='Coherence %0.1f Hz',
-                     subjects_dir=subjects_dir)
+brain = coh_stc.plot('sample', 'inflated', 'both',
+                     time_label='Coherence %0.1f Hz',
+                     subjects_dir=subjects_dir,
+                     clim=dict(kind='value', lims=(0.25, 0.4, 0.65)))
 brain.show_view('lateral')
diff --git a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
index 472375e..c0908c9 100644
--- a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
+++ b/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py
@@ -6,12 +6,11 @@ Compute full spectrum source space connectivity between labels
 The connectivity is computed between 4 labels across the spectrum
 between 5 and 40 Hz.
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne.datasets import sample
@@ -19,6 +18,8 @@ from mne.io import Raw
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 from mne.connectivity import spectral_connectivity
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -65,11 +66,10 @@ label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
 fmin, fmax = 5., 40.
 sfreq = raw.info['sfreq']  # the sampling frequency
 
-con, freqs, times, n_epochs, n_tapers = spectral_connectivity(label_ts,
-        method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin,
-        fmax=fmax, mt_adaptive=True, n_jobs=2)
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
+    label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
+    fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=2)
 
-import matplotlib.pyplot as plt
 n_rows, n_cols = con.shape[:2]
 fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
 plt.suptitle('Between labels connectivity')
diff --git a/examples/connectivity/plot_mne_inverse_label_connectivity.py b/examples/connectivity/plot_mne_inverse_label_connectivity.py
index 8bb9a21..c60bea9 100644
--- a/examples/connectivity/plot_mne_inverse_label_connectivity.py
+++ b/examples/connectivity/plot_mne_inverse_label_connectivity.py
@@ -8,16 +8,15 @@ source space based on dSPM inverse solutions and a FreeSurfer cortical
 parcellation. The connectivity is visualized using a circular graph which
 is ordered based on the locations of the regions.
 """
-
 # Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
@@ -25,6 +24,8 @@ from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 from mne.connectivity import spectral_connectivity
 from mne.viz import circular_layout, plot_connectivity_circle
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -81,9 +82,9 @@ fmin = 8.
 fmax = 13.
 sfreq = raw.info['sfreq']  # the sampling frequency
 con_methods = ['pli', 'wpli2_debiased']
-con, freqs, times, n_epochs, n_tapers = spectral_connectivity(label_ts,
-        method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
-        fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=2)
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
+    label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
+    fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=2)
 
 # con is a 3D array, get the connectivity for the first (and only) freq. band
 # for each method
@@ -106,7 +107,7 @@ for name in lh_labels:
     label_ypos.append(ypos)
 
 # Reorder the labels based on their location
-lh_labels = [label for (ypos, label) in sorted(zip(label_ypos, lh_labels))]
+lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
 
 # For the right hemi
 rh_labels = [label[:-2] + 'rh' for label in lh_labels]
@@ -125,7 +126,6 @@ plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
                          node_angles=node_angles, node_colors=label_colors,
                          title='All-to-All Connectivity left-Auditory '
                                'Condition (PLI)')
-import matplotlib.pyplot as plt
 plt.savefig('circle.png', facecolor='black')
 
 # Plot connectivity for both methods in the same plot
diff --git a/examples/connectivity/plot_mne_inverse_psi_visual.py b/examples/connectivity/plot_mne_inverse_psi_visual.py
index 9f73eee..34889a6 100644
--- a/examples/connectivity/plot_mne_inverse_psi_visual.py
+++ b/examples/connectivity/plot_mne_inverse_psi_visual.py
@@ -19,21 +19,20 @@ References
 Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
 pp. 1-4, Jun. 2008.
 """
-
 # Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import numpy as np
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
 from mne.connectivity import seed_target_indices, phase_slope_index
-from mne.viz import mne_analyze_colormap
 
+print(__doc__)
 
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
@@ -94,8 +93,9 @@ fmax = 30.
 tmin_con = 0.
 sfreq = raw.info['sfreq']  # the sampling frequency
 
-psi, freqs, times, n_epochs, _ = phase_slope_index(comb_ts, mode='multitaper',
-    indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax, tmin=tmin_con)
+psi, freqs, times, n_epochs, _ = phase_slope_index(
+    comb_ts, mode='multitaper', indices=indices, sfreq=sfreq,
+    fmin=fmin, fmax=fmax, tmin=tmin_con)
 
 # Generate a SourceEstimate with the PSI. This is simple since we used a single
 # seed (inspect the indices variable to see how the PSI scores are arranged in
@@ -106,10 +106,9 @@ psi_stc = mne.SourceEstimate(psi, vertices=vertices, tmin=0, tstep=1,
 # Now we can visualize the PSI using the plot method. We use a custom colormap
 # to show signed values
 v_max = np.max(np.abs(psi))
-colormap = mne_analyze_colormap(limits=[0, v_max / 3, v_max])
 brain = psi_stc.plot(surface='inflated', hemi='lh',
                      time_label='Phase Slope Index (PSI)',
-                     subjects_dir=subjects_dir, colormap=colormap)
-brain.scale_data_colormap(fmin=-v_max, fmid=0., fmax=v_max, transparent=False)
+                     subjects_dir=subjects_dir,
+                     clim=dict(kind='percent', pos_lims=(95, 97.5, 100)))
 brain.show_view('medial')
 brain.add_label(fname_label, color='green', alpha=0.7)
diff --git a/examples/connectivity/plot_sensor_connectivity.py b/examples/connectivity/plot_sensor_connectivity.py
index 3800ddc..ddc4d7d 100644
--- a/examples/connectivity/plot_sensor_connectivity.py
+++ b/examples/connectivity/plot_sensor_connectivity.py
@@ -12,8 +12,6 @@ are used which produces strong connectvitiy in the right occipital sensors.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 from scipy import linalg
 
@@ -22,6 +20,8 @@ from mne import io
 from mne.connectivity import spectral_connectivity
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -37,7 +37,7 @@ raw.info['bads'] += ['MEG 2443']
 
 # Pick MEG gradiometers
 picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
-                        exclude='bads')
+                       exclude='bads')
 
 # Create epochs for the visual condition
 event_id, tmin, tmax = 3, -0.2, 0.5
@@ -49,10 +49,9 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
 fmin, fmax = 3., 9.
 sfreq = raw.info['sfreq']  # the sampling frequency
 tmin = 0.0  # exclude the baseline period
-con, freqs, times, n_epochs, n_tapers = spectral_connectivity(epochs,
-    method='pli', mode='multitaper', sfreq=sfreq,
-    fmin=fmin, fmax=fmax, faverage=True, tmin=tmin,
-    mt_adaptive=False, n_jobs=2)
+con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
+    epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax,
+    faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=2)
 
 # the epochs contain an EOG channel, which we remove now
 ch_names = epochs.ch_names
@@ -64,10 +63,7 @@ con = con[idx][:, idx]
 con = con[:, :, 0]
 
 # Now, visualize the connectivity in 3D
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
+from mayavi import mlab  # noqa
 
 mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
 
@@ -76,7 +72,7 @@ sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
 sens_loc = np.array(sens_loc)
 
 pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
-                    color=(0, 0, 1), opacity=0.5, scale_factor=0.01)
+                    color=(1, 1, 1), opacity=1, scale_factor=0.005)
 
 # Get the strongest connections
 n_con = 20  # show up to 20 connections
@@ -100,8 +96,11 @@ vmin = np.min(con_val)
 for val, nodes in zip(con_val, con_nodes):
     x1, y1, z1 = sens_loc[nodes[0]]
     x2, y2, z2 = sens_loc[nodes[1]]
-    mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
-                vmin=vmin, vmax=vmax, tube_radius=0.002)
+    points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
+                         vmin=vmin, vmax=vmax, tube_radius=0.001,
+                         colormap='RdBu')
+    points.module_manager.scalar_lut_manager.reverse_lut = True
+
 
 mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
 
diff --git a/examples/datasets/plot_brainstorm_data.py b/examples/datasets/plot_brainstorm_data.py
new file mode 100644
index 0000000..eca7453
--- /dev/null
+++ b/examples/datasets/plot_brainstorm_data.py
@@ -0,0 +1,74 @@
+"""
+============================
+Brainstorm tutorial datasets
+============================
+
+Here we compute the evoked from raw for the Brainstorm
+tutorial dataset. For comparison, see:
+http://neuroimage.usc.edu/brainstorm/Tutorials/MedianNerveCtf
+
+References
+----------
+.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
+Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
+Computational Intelligence and Neuroscience, vol. 2011, Article ID 879716,
+13 pages, 2011. doi:10.1155/2011/879716
+"""
+
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+import mne
+from mne.datasets.brainstorm import bst_raw
+from mne.io import Raw
+
+print(__doc__)
+
+tmin, tmax, event_id = -0.1, 0.3, 2  # take right-hand somato
+reject = dict(mag=4e-12, eog=250e-6)
+
+data_path = bst_raw.data_path()
+
+raw_fname = data_path + '/MEG/bst_raw/' + \
+                        'subj001_somatosensory_20111109_01_AUX-f_raw.fif'
+raw = Raw(raw_fname, preload=True)
+raw.plot()
+
+# set EOG channel
+raw.set_channel_types({'EEG058': 'eog'})
+
+# show power line interference and remove it
+raw.plot_psd()
+raw.notch_filter(np.arange(60, 181, 60))
+
+events = mne.find_events(raw, stim_channel='UPPT001')
+
+# pick MEG channels
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
+                       exclude='bads')
+
+# Compute epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=(None, 0), reject=reject, preload=False)
+
+# compute evoked
+evoked = epochs.average()
+
+# remove physiological artifacts (eyeblinks, heartbeats) using SSP on baseline
+evoked.add_proj(mne.compute_proj_evoked(evoked.crop(tmax=0, copy=True)))
+evoked.apply_proj()
+
+# fix stim artifact
+mne.preprocessing.fix_stim_artifact(evoked)
+
+# correct delays due to hardware (stim artifact is at 4 ms)
+evoked.shift_time(-0.004)
+
+# plot the result
+evoked.plot()
+
+# show topomaps
+evoked.plot_topomap(times=np.array([0.016, 0.030, 0.060, 0.070]))
diff --git a/examples/datasets/plot_megsim_data.py b/examples/datasets/plot_megsim_data.py
index 425969b..47b6195 100644
--- a/examples/datasets/plot_megsim_data.py
+++ b/examples/datasets/plot_megsim_data.py
@@ -14,21 +14,22 @@ Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
 (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
 Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
 """
-print(__doc__)
 
 from mne import find_events, Epochs, pick_types, read_evokeds
 from mne.io import Raw
 from mne.datasets.megsim import load_data
 
+print(__doc__)
+
 condition = 'visual'  # or 'auditory' or 'somatosensory'
 
 # Load experimental RAW files for the visual condition
 raw_fnames = load_data(condition=condition, data_format='raw',
-                       data_type='experimental')
+                       data_type='experimental', verbose=True)
 
 # Load simulation evoked files for the visual condition
 evoked_fnames = load_data(condition=condition, data_format='evoked',
-                          data_type='simulation')
+                          data_type='simulation', verbose=True)
 
 raw = Raw(raw_fnames[0])
 events = find_events(raw, stim_channel="STI 014", shortest_event=1)
diff --git a/examples/datasets/plot_megsim_data_single_trial.py b/examples/datasets/plot_megsim_data_single_trial.py
index d561dc7..451e7e8 100644
--- a/examples/datasets/plot_megsim_data_single_trial.py
+++ b/examples/datasets/plot_megsim_data_single_trial.py
@@ -14,16 +14,17 @@ Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
 (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
 Realistic Simulated and Empirical Data. Neuroinformatics 10:141-158
 """
-print(__doc__)
 
 from mne import read_evokeds
 from mne.datasets.megsim import load_data
 
+print(__doc__)
+
 condition = 'visual'  # or 'auditory' or 'somatosensory'
 
 # Load experimental RAW files for the visual condition
 epochs_fnames = load_data(condition=condition, data_format='single-trial',
-                          data_type='simulation')
+                          data_type='simulation', verbose=True)
 
 # Take only 10 trials from the same simulation setup.
 epochs_fnames = [f for f in epochs_fnames if 'sim6_trial_' in f][:10]
diff --git a/examples/datasets/plot_spm_faces_dataset.py b/examples/datasets/plot_spm_faces_dataset.py
index b7dad75..fe6bef9 100644
--- a/examples/datasets/plot_spm_faces_dataset.py
+++ b/examples/datasets/plot_spm_faces_dataset.py
@@ -1,3 +1,4 @@
+# doc:slow-example
 """
 ==========================================
 From raw data to dSPM on SPM Faces dataset
@@ -9,14 +10,15 @@ Runs a full pipeline using MNE-Python:
 - forward model computation
 - source reconstruction using dSPM on the contrast : "faces - scrambled"
 
+Note that this example does quite a bit of processing, so even on a
+fast machine it can take about 10 minutes to complete.
 """
-print(__doc__)
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
+import os.path as op
 import matplotlib.pyplot as plt
 
 import mne
@@ -25,6 +27,7 @@ from mne.preprocessing import ICA, create_eog_epochs
 from mne import io
 from mne.minimum_norm import make_inverse_operator, apply_inverse
 
+print(__doc__)
 
 data_path = spm_face.data_path()
 subjects_dir = data_path + '/subjects'
@@ -85,10 +88,8 @@ noise_cov = mne.compute_covariance(epochs_cln, tmax=0)
 trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
                            'raw-trans.fif')
 
-maps = mne.make_field_map(evoked[0], trans_fname=trans_fname,
-                          subject='spm', subjects_dir=subjects_dir,
-                          n_jobs=1)
-
+maps = mne.make_field_map(evoked[0], trans_fname, subject='spm',
+                          subjects_dir=subjects_dir, n_jobs=1)
 
 evoked[0].plot_field(maps, time=0.170)
 
@@ -97,12 +98,15 @@ evoked[0].plot_field(maps, time=0.170)
 # Compute forward model
 
 # Make source space
-src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
-                             overwrite=True)
+src_fname = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
+if not op.isfile(src_fname):
+    src = mne.setup_source_space('spm', src_fname, spacing='oct6',
+                                 subjects_dir=subjects_dir, overwrite=True)
+else:
+    src = mne.read_source_spaces(src_fname)
 
-mri = trans_fname
 bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
-forward = mne.make_forward_solution(contrast.info, mri=mri, src=src, bem=bem)
+forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
 forward = mne.convert_forward_solution(forward, surf_ori=True)
 
 ###############################################################################
@@ -116,15 +120,11 @@ inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
                                          loose=0.2, depth=0.8)
 
 # Compute inverse solution on contrast
-stc = apply_inverse(contrast, inverse_operator, lambda2, method,
-                    pick_normal=False)
+stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None)
 # stc.save('spm_%s_dSPM_inverse' % constrast.comment)
 
-# plot constrast
-# Plot brain in 3D with PySurfer if available. Note that the subject name
-# is already known by the SourceEstimate stc object.
-brain = stc.plot(surface='inflated', hemi='both', subjects_dir=subjects_dir)
+# Plot contrast in 3D with PySurfer if available
+brain = stc.plot(hemi='both', subjects_dir=subjects_dir)
 brain.set_time(170.0)  # milliseconds
-brain.scale_data_colormap(fmin=4, fmid=6, fmax=8, transparent=True)
 brain.show_view('ventral')
 # brain.save_image('dSPM_map.png')
diff --git a/examples/decoding/plot_decoding_csp_eeg.py b/examples/decoding/plot_decoding_csp_eeg.py
index 6c409ae..60da01d 100644
--- a/examples/decoding/plot_decoding_csp_eeg.py
+++ b/examples/decoding/plot_decoding_csp_eeg.py
@@ -28,20 +28,19 @@ The data set is available at PhysioNet [3]
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 import numpy as np
 import matplotlib.pyplot as plt
 
-from mne import Epochs, pick_types
-from mne.io import concatenate_raws
-from mne.io.edf import read_raw_edf
+from mne import Epochs, pick_types, find_events
+from mne.channels import read_layout
+from mne.io import concatenate_raws, read_raw_edf
 from mne.datasets import eegbci
-from mne.event import find_events
 from mne.decoding import CSP
-from mne.layouts import read_layout
 
-###############################################################################
-## Set parameters and read data
+print(__doc__)
+
+# #############################################################################
+# # Set parameters and read data
 
 # avoid classification of evoked responses by using epochs that start 1s after
 # cue onset.
@@ -51,11 +50,11 @@ subject = 1
 runs = [6, 10, 14]  # motor imagery: hands vs feet
 
 raw_fnames = eegbci.load_data(subject, runs)
-raw_files = [read_raw_edf(f, tal_channel=-1, preload=True) for f in raw_fnames]
+raw_files = [read_raw_edf(f, preload=True) for f in raw_fnames]
 raw = concatenate_raws(raw_files)
 
-# strip channel names
-raw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]
+# strip channel names of "." characters
+raw.rename_channels(lambda x: x.strip('.'))
 
 # Apply band-pass filter
 raw.filter(7., 30., method='iir')
@@ -75,8 +74,8 @@ labels = epochs.events[:, -1] - 2
 ###############################################################################
 # Classification with linear discrimant analysis
 
-from sklearn.lda import LDA
-from sklearn.cross_validation import ShuffleSplit
+from sklearn.lda import LDA  # noqa
+from sklearn.cross_validation import ShuffleSplit  # noqa
 
 # Assemble a classifier
 svc = LDA()
@@ -89,8 +88,8 @@ epochs_data = epochs.get_data()
 epochs_data_train = epochs_train.get_data()
 
 # Use scikit-learn Pipeline with cross_val_score function
-from sklearn.pipeline import Pipeline
-from sklearn.cross_validation import cross_val_score
+from sklearn.pipeline import Pipeline  # noqa
+from sklearn.cross_validation import cross_val_score  # noqa
 clf = Pipeline([('CSP', csp), ('SVC', svc)])
 scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
 
@@ -140,6 +139,8 @@ for train_idx, test_idx in cv:
 
 # Plot scores over time
 w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
+
+plt.figure()
 plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
 plt.axvline(0, linestyle='--', color='k', label='Onset')
 plt.axhline(0.5, linestyle='-', color='k', label='Chance')
diff --git a/examples/decoding/plot_decoding_csp_space.py b/examples/decoding/plot_decoding_csp_space.py
index 6429e55..4bc8fe4 100644
--- a/examples/decoding/plot_decoding_csp_space.py
+++ b/examples/decoding/plot_decoding_csp_space.py
@@ -18,13 +18,14 @@ See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 import numpy as np
 
 import mne
 from mne import io
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -53,9 +54,9 @@ evoked = epochs.average()
 ###############################################################################
 # Decoding in sensor space using a linear SVM
 
-from sklearn.svm import SVC
-from sklearn.cross_validation import ShuffleSplit
-from mne.decoding import CSP
+from sklearn.svm import SVC  # noqa
+from sklearn.cross_validation import ShuffleSplit  # noqa
+from mne.decoding import CSP  # noqa
 
 n_components = 3  # pick some components
 svc = SVC(C=1, kernel='linear')
@@ -85,15 +86,15 @@ print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
 
 # Or use much more convenient scikit-learn cross_val_score function using
 # a Pipeline
-from sklearn.pipeline import Pipeline
-from sklearn.cross_validation import cross_val_score
+from sklearn.pipeline import Pipeline  # noqa
+from sklearn.cross_validation import cross_val_score  # noqa
 cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
 clf = Pipeline([('CSP', csp), ('SVC', svc)])
 scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
 print(scores.mean())  # should match results above
 
 # And using reuglarized csp with Ledoit-Wolf estimator
-csp = CSP(n_components=n_components, reg='lws')
+csp = CSP(n_components=n_components, reg='ledoit_wolf')
 clf = Pipeline([('CSP', csp), ('SVC', svc)])
 scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
 print(scores.mean())  # should get better results than above
diff --git a/examples/decoding/plot_decoding_sensors.py b/examples/decoding/plot_decoding_sensors.py
index 4a43644..984e3a2 100644
--- a/examples/decoding/plot_decoding_sensors.py
+++ b/examples/decoding/plot_decoding_sensors.py
@@ -8,16 +8,18 @@ data in sensor space. Here the classifier is applied to every time
 point.
 """
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Jean-Remi King <jeanremi.king at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 import matplotlib.pyplot as plt
-import numpy as np
 
 import mne
 from mne import io
 from mne.datasets import sample
+from mne.decoding import TimeDecoding
+
+print(__doc__)
 
 data_path = sample.data_path()
 
@@ -47,53 +49,14 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
 
 epochs_list = [epochs[k] for k in event_id]
 mne.epochs.equalize_epoch_counts(epochs_list)
-
-###############################################################################
-# Decoding in sensor space using a linear SVM
-n_times = len(epochs.times)
-# Take only the data channels (here the gradiometers)
 data_picks = mne.pick_types(epochs.info, meg=True, exclude='bads')
-# Make arrays X and y such that :
-# X is 3d with X.shape[0] is the total number of epochs to classify
-# y is filled with integers coding for the class to predict
-# We must have X.shape[0] equal to y.shape[0]
-X = [e.get_data()[:, data_picks, :] for e in epochs_list]
-y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
-X = np.concatenate(X)
-y = np.concatenate(y)
 
-from sklearn.svm import SVC
-from sklearn.cross_validation import cross_val_score, ShuffleSplit
-
-clf = SVC(C=1, kernel='linear')
-# Define a monte-carlo cross-validation generator (reduce variance):
-cv = ShuffleSplit(len(X), 10, test_size=0.2)
-
-scores = np.empty(n_times)
-std_scores = np.empty(n_times)
-
-for t in xrange(n_times):
-    Xt = X[:, :, t]
-    # Standardize features
-    Xt -= Xt.mean(axis=0)
-    Xt /= Xt.std(axis=0)
-    # Run cross-validation
-    # Note : for sklearn the Xt matrix should be 2d (n_samples x n_features)
-    scores_t = cross_val_score(clf, Xt, y, cv=cv, n_jobs=1)
-    scores[t] = scores_t.mean()
-    std_scores[t] = scores_t.std()
-
-times = 1e3 * epochs.times
-scores *= 100  # make it percentage
-std_scores *= 100
-plt.plot(times, scores, label="Classif. score")
-plt.axhline(50, color='k', linestyle='--', label="Chance level")
-plt.axvline(0, color='r', label='stim onset')
-plt.legend()
-hyp_limits = (scores - std_scores, scores + std_scores)
-plt.fill_between(times, hyp_limits[0], y2=hyp_limits[1], color='b', alpha=0.5)
-plt.xlabel('Times (ms)')
-plt.ylabel('CV classification score (% correct)')
-plt.ylim([30, 100])
-plt.title('Sensor space decoding')
-plt.show()
+###############################################################################
+# Setup decoding: default is linear SVC
+td = TimeDecoding(predict_mode='cross-validation', n_jobs=1)
+# Fit
+td.fit(epochs)
+# Compute accuracy
+td.score(epochs)
+# Plot scores across time
+td.plot(title='Sensor space decoding')
diff --git a/examples/decoding/plot_decoding_spatio_temporal_source.py b/examples/decoding/plot_decoding_spatio_temporal_source.py
index dddfd49..c801eb3 100644
--- a/examples/decoding/plot_decoding_spatio_temporal_source.py
+++ b/examples/decoding/plot_decoding_spatio_temporal_source.py
@@ -9,14 +9,11 @@ selection is employed to confine the classification to the potentially
 relevant features. The classifier then is trained to selected features of
 epochs in source space.
 """
-
 # Author: Denis A. Engemann <denis.engemann at gmail.com>
 #         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 import os
 import numpy as np
@@ -24,6 +21,8 @@ from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
 fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
@@ -99,8 +98,8 @@ X -= X.mean(axis=0)
 X /= X.std(axis=0)
 
 # prepare classifier
-from sklearn.svm import SVC
-from sklearn.cross_validation import ShuffleSplit
+from sklearn.svm import SVC  # noqa
+from sklearn.cross_validation import ShuffleSplit  # noqa
 
 # Define a monte-carlo cross-validation generator (reduce variance):
 n_splits = 10
@@ -108,8 +107,8 @@ clf = SVC(C=1, kernel='linear')
 cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
 
 # setup feature selection and classification pipeline
-from sklearn.feature_selection import SelectKBest, f_classif
-from sklearn.pipeline import Pipeline
+from sklearn.feature_selection import SelectKBest, f_classif  # noqa
+from sklearn.pipeline import Pipeline  # noqa
 
 # we will use an ANOVA f-test to preselect relevant spatio-temporal units
 feature_selection = SelectKBest(f_classif, k=500)  # take the best 500
@@ -143,12 +142,11 @@ feature_weights -= feature_weights.mean(axis=1)[:, None]
 # unmask, take absolute values, emulate f-value scale
 feature_weights = np.abs(feature_weights.data) * 10
 
-vertices = [stc.lh_vertno, np.array([])]  # empty array for right hemisphere
+vertices = [stc.lh_vertno, np.array([], int)]  # empty array for right hemi
 stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
                               tmin=stc.tmin, tstep=stc.tstep,
                               subject='sample')
 
-brain = stc_feat.plot(subject=subject, fmin=1, fmid=5.5, fmax=20)
+brain = stc_feat.plot()
 brain.set_time(100)
-brain.show_view('l')
-# take the medial view to further explore visual areas
+brain.show_view('l')  # take the medial view to further explore visual areas
diff --git a/examples/decoding/plot_decoding_time_generalization.py b/examples/decoding/plot_decoding_time_generalization.py
index 7c61740..f9495b0 100644
--- a/examples/decoding/plot_decoding_time_generalization.py
+++ b/examples/decoding/plot_decoding_time_generalization.py
@@ -1,39 +1,36 @@
 """
-========================================================
-Decoding sensor space data with over-time generalization
-========================================================
+==========================================================
+Decoding sensor space data with Generalization Across Time
+==========================================================
 
 This example runs the analysis computed in:
 
 Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
 and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
-unexpected sounds", PLOS ONE, 2013
+unexpected sounds", PLOS ONE, 2013,
+http://www.ncbi.nlm.nih.gov/pubmed/24475052
 
 The idea is to learn at one time instant and assess if the decoder
 can predict accurately over time.
 """
-print(__doc__)
-
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# Authors: Jean-Remi King <jeanremi.king at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-import numpy as np
-import matplotlib.pyplot as plt
-
 import mne
 from mne.datasets import spm_face
-from mne.decoding import time_generalization
+from mne.decoding import GeneralizationAcrossTime
 
-data_path = spm_face.data_path()
+print(__doc__)
 
-###############################################################################
+# Preprocess data
+data_path = spm_face.data_path()
 # Load and filter data, set up epochs
-
 raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
 
 raw = mne.io.Raw(raw_fname % 1, preload=True)  # Take first run
-raw.append(mne.io.Raw(raw_fname % 2, preload=True))  # Take second run too
 
 picks = mne.pick_types(raw.info, meg=True, exclude='bads')
 raw.filter(1, 45, method='iir')
@@ -42,50 +39,17 @@ events = mne.find_events(raw, stim_channel='UPPT001')
 event_id = {"faces": 1, "scrambled": 2}
 tmin, tmax = -0.1, 0.5
 
-# Set up pick list
-picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
-                       ref_meg=False, exclude='bads')
-
-# Read epochs
 decim = 4  # decimate to make the example faster to run
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                     picks=picks, baseline=None, preload=True,
-                    reject=dict(mag=1.5e-12), decim=decim)
-
-epochs_list = [epochs[k] for k in event_id]
-mne.epochs.equalize_epoch_counts(epochs_list)
-
-###############################################################################
-# Run decoding
-
-# Compute Area Under the Curver (AUC) Receiver Operator Curve (ROC) score
-# of time generalization. A perfect decoding would lead to AUCs of 1.
-# Chance level is at 0.5.
-# The default classifier is a linear SVM (C=1) after feature scaling.
-scores = time_generalization(epochs_list, clf=None, cv=5, scoring="roc_auc",
-                             shuffle=True, n_jobs=2)
-
-###############################################################################
-# Now visualize
-times = 1e3 * epochs.times  # convert times to ms
+                    reject=dict(mag=1.5e-12), decim=decim, verbose=False)
 
-plt.figure()
-plt.imshow(scores, interpolation='nearest', origin='lower',
-           extent=[times[0], times[-1], times[0], times[-1]],
-           vmin=0.1, vmax=0.9, cmap='RdBu_r')
-plt.xlabel('Times Test (ms)')
-plt.ylabel('Times Train (ms)')
-plt.title('Time generalization (%s vs. %s)' % tuple(event_id.keys()))
-plt.axvline(0, color='k')
-plt.axhline(0, color='k')
-plt.colorbar()
+# Define decoder. The decision function is employed to use cross-validation
+gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=2)
 
-plt.figure()
-plt.plot(times, np.diag(scores), label="Classif. score")
-plt.axhline(0.5, color='k', linestyle='--', label="Chance level")
-plt.axvline(0, color='r', label='stim onset')
-plt.legend()
-plt.xlabel('Time (ms)')
-plt.ylabel('ROC classification score')
-plt.title('Decoding (%s vs. %s)' % tuple(event_id.keys()))
-plt.show()
+# fit and score
+gat.fit(epochs)
+gat.score(epochs)
+gat.plot(vmin=0.1, vmax=0.9,
+         title="Generalization Across Time (faces vs. scrambled)")
+gat.plot_diagonal()  # plot decoding across time (correspond to GAT diagonal)
diff --git a/examples/decoding/plot_decoding_time_generalization_conditions.py b/examples/decoding/plot_decoding_time_generalization_conditions.py
new file mode 100644
index 0000000..215c8a6
--- /dev/null
+++ b/examples/decoding/plot_decoding_time_generalization_conditions.py
@@ -0,0 +1,75 @@
+"""
+=========================================================================
+Decoding sensor space data with generalization across time and conditions
+=========================================================================
+
+This example runs the analysis computed in:
+
+Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
+and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
+unexpected sounds", PLOS ONE, 2013,
+http://www.ncbi.nlm.nih.gov/pubmed/24475052
+
+King & Dehaene (2014) 'Characterizing the dynamics of mental
+representations: the temporal generalization method', Trends In Cognitive
+Sciences, 18(4), 203-210.
+http://www.ncbi.nlm.nih.gov/pubmed/24593982
+
+The idea is to learn at one time instant and assess if the decoder
+can predict accurately over time and on a second set of conditions.
+"""
+# Authors: Jean-Remi King <jeanremi.king at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+import mne
+from mne.datasets import sample
+from mne.decoding import GeneralizationAcrossTime
+
+print(__doc__)
+
+# Preprocess data
+data_path = sample.data_path()
+# Load and filter data, set up epochs
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+raw = mne.io.Raw(raw_fname, preload=True)
+picks = mne.pick_types(raw.info, meg=True, exclude='bads')  # Pick MEG channels
+raw.filter(1, 30, method='fft')  # Band pass filtering signals
+events = mne.read_events(events_fname)
+event_id = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
+decim = 2  # decimate to make the example faster to run
+epochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,
+                    picks=picks, baseline=None, preload=True,
+                    reject=dict(mag=5e-12), decim=decim, verbose=False)
+
+# We will train the classifier on all left visual vs auditory trials
+# and test on all right visual vs auditory trials.
+
+# In this case, because the test data is independent from the train data,
+# we test the classifier of each fold and average the respective predictions.
+
+# Define events of interest
+triggers = epochs.events[:, 2]
+viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)
+
+gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)
+
+# For our left events, which ones are visual?
+viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
+# To make scikit-learn happy, we converted the bool array to integers
+# in the same line. This results in an array of zeros and ones:
+print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))
+
+gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)
+
+# For our right events, which ones are visual?
+viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)
+
+gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
+gat.plot(
+    title="Generalization Across Time (visual vs auditory): left to right")
diff --git a/examples/decoding/plot_decoding_xdawn_eeg.py b/examples/decoding/plot_decoding_xdawn_eeg.py
new file mode 100644
index 0000000..372be8f
--- /dev/null
+++ b/examples/decoding/plot_decoding_xdawn_eeg.py
@@ -0,0 +1,101 @@
+"""
+=============================
+ XDAWN Decoding From EEG data
+=============================
+
+ERP decoding with Xdawn. For each event type, a set of spatial Xdawn filters
+are trained and applied on the signal. Channels are concatenated and rescaled
+to create features vectors that will be fed into a Logistic Regression.
+
+References
+----------
+[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
+algorithm to enhance evoked potentials: application to brain-computer
+interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
+
+[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
+August). Theoretical analysis of xDAWN algorithm: application to an
+efficient sensor selection in a P300 BCI. In Signal Processing Conference,
+2011 19th European (pp. 1382-1386). IEEE.
+"""
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from sklearn.cross_validation import StratifiedKFold
+from sklearn.pipeline import make_pipeline
+from sklearn.linear_model import LogisticRegression
+from sklearn.metrics import classification_report, confusion_matrix
+from sklearn.preprocessing import MinMaxScaler
+
+from mne import io, pick_types, read_events, Epochs
+from mne.datasets import sample
+from mne.preprocessing import Xdawn
+from mne.decoding import EpochsVectorizer
+from mne.viz import tight_layout
+
+
+print(__doc__)
+
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters and read data
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+tmin, tmax = -0.1, 0.3
+event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 20, method='iir')
+events = read_events(event_fname)
+
+picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
+                   exclude='bads')
+
+epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
+                picks=picks, baseline=None, preload=True,
+                add_eeg_ref=False, verbose=False)
+
+# Create classification pipeline
+clf = make_pipeline(Xdawn(n_components=3),
+                    EpochsVectorizer(),
+                    MinMaxScaler(),
+                    LogisticRegression(penalty='l1'))
+
+# Get the labels
+labels = epochs.events[:, -1]
+
+# Cross validator
+cv = StratifiedKFold(y=labels, n_folds=10, shuffle=True, random_state=42)
+
+# Do cross-validation
+preds = np.empty(len(labels))
+for train, test in cv:
+    clf.fit(epochs[train], labels[train])
+    preds[test] = clf.predict(epochs[test])
+
+# Classification report
+target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
+report = classification_report(labels, preds, target_names=target_names)
+print(report)
+
+# Normalized confusion matrix
+cm = confusion_matrix(labels, preds)
+cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
+
+# Plot confusion matrix
+plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
+plt.title('Normalized Confusion matrix')
+plt.colorbar()
+tick_marks = np.arange(len(target_names))
+plt.xticks(tick_marks, target_names, rotation=45)
+plt.yticks(tick_marks, target_names)
+tight_layout()
+plt.ylabel('True label')
+plt.xlabel('Predicted label')
+plt.show()
diff --git a/examples/decoding/plot_ems_filtering.py b/examples/decoding/plot_ems_filtering.py
index b37bf0a..b7ed6e5 100644
--- a/examples/decoding/plot_ems_filtering.py
+++ b/examples/decoding/plot_ems_filtering.py
@@ -22,17 +22,19 @@ trials by condition. A second plot shows the average time series for each
 condition. Finally a topographic plot is created which exhibits the
 temporal evolution of the spatial filters.
 """
-
 # Author: Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.datasets import sample
 from mne.decoding import compute_ems
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 # Set parameters
@@ -63,8 +65,6 @@ epochs.equalize_event_counts(epochs.event_id, copy=False)
 # compute surrogate time series
 surrogates, filters, conditions = compute_ems(epochs, ['AudL', 'VisL'])
 
-import matplotlib.pyplot as plt
-
 times = epochs.times * 1e3
 plt.figure()
 plt.title('single trial surrogates')
diff --git a/examples/decoding/plot_linear_model_patterns.py b/examples/decoding/plot_linear_model_patterns.py
new file mode 100644
index 0000000..f30822c
--- /dev/null
+++ b/examples/decoding/plot_linear_model_patterns.py
@@ -0,0 +1,84 @@
+"""
+===============================================================
+Linear classifier on sensor data with plot patterns and filters
+===============================================================
+
+Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
+data in sensor space. Fit a linear classifier with the LinearModel object
+providing topographical patterns which are more neurophysiologically
+interpretable [1] than the classifier filters (weight vectors).
+The patterns explain how the MEG and EEG data were generated from the
+discriminant neural sources which are extracted by the filters.
+Note patterns/filters in MEG data are more similar than EEG data
+because the noise is less spatially correlated in MEG than EEG.
+
+[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
+Blankertz, B., & Bießmann, F. (2014). On the interpretation of
+weight vectors of linear models in multivariate neuroimaging.
+NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
+"""
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Romain Trachel <trachelr at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne import io
+from mne.datasets import sample
+
+from sklearn.preprocessing import StandardScaler
+from sklearn.linear_model import LogisticRegression
+
+# import a linear classifier from mne.decoding
+from mne.decoding import LinearModel
+
+print(__doc__)
+
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+tmin, tmax = -0.2, 0.5
+event_id = dict(aud_l=1, vis_l=3)
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(2, None, method='iir')  # replace baselining with high-pass
+events = mne.read_events(event_fname)
+
+# Read epochs
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    decim=4, baseline=None, preload=True)
+
+labels = epochs.events[:, -1]
+
+# get MEG and EEG data
+meg_epochs = epochs.pick_types(meg=True, eeg=False, copy=True)
+meg_data = meg_epochs.get_data().reshape(len(labels), -1)
+eeg_epochs = epochs.pick_types(meg=False, eeg=True, copy=True)
+eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
+
+###############################################################################
+# Decoding in sensor space using a LogisticRegression classifier
+
+clf = LogisticRegression()
+sc = StandardScaler()
+
+# create a linear model with LogisticRegression
+model = LinearModel(clf)
+
+# fit the classifier on MEG data
+X = sc.fit_transform(meg_data)
+model.fit(X, labels)
+# plot patterns and filters
+model.plot_patterns(meg_epochs.info, title='MEG Patterns')
+model.plot_filters(meg_epochs.info, title='MEG Filters')
+
+# fit the classifier on EEG data
+X = sc.fit_transform(eeg_data)
+model.fit(X, labels)
+# plot patterns and filters
+model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
+model.plot_filters(eeg_epochs.info, title='EEG Filters')
diff --git a/examples/export/README.txt b/examples/export/README.txt
deleted file mode 100644
index bc9e455..0000000
--- a/examples/export/README.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Export of MNE data for use in other packages
---------------------------------------------
-
-Export as data frames in Pandas, TimeSeries in nitime etc.
diff --git a/examples/export/plot_epochs_to_nitime.py b/examples/export/plot_epochs_to_nitime.py
deleted file mode 100644
index 037ab13..0000000
--- a/examples/export/plot_epochs_to_nitime.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-=======================
-Export epochs to NiTime
-=======================
-
-This script shows how to export Epochs to the NiTime library
-for further signal processing and data analysis.
-
-"""
-
-# Author: Denis Engemann <denis.engemann at gmail.com>
-#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import numpy as np
-import mne
-from mne import io
-from mne.datasets import sample
-data_path = sample.data_path()
-
-###############################################################################
-# Set parameters
-raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
-event_id, tmin, tmax = 1, -0.2, 0.5
-
-# Setup for reading the raw data
-raw = io.Raw(raw_fname)
-events = mne.read_events(event_fname)
-
-# Set up pick list: EEG + MEG - bad channels (modify to your needs)
-raw.info['bads'] += ['MEG 2443', 'EEG 053']
-picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
-                       exclude='bads')
-
-# Read epochs
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
-                    picks=picks, baseline=(None, 0), preload=True,
-                    reject=dict(grad=4000e-13, eog=150e-6))
-
-# Export to NiTime
-epochs_ts = epochs.to_nitime(picks=np.arange(20), collapse=True)
-
-###############################################################################
-# Now use nitime's OO-interface to compute coherence between sensors
-
-from nitime.analysis import MTCoherenceAnalyzer
-from nitime.viz import drawmatrix_channels
-import matplotlib.pyplot as plt
-
-# setup coherency analyzer
-C = MTCoherenceAnalyzer(epochs_ts)
-
-# confine analysis to 10 - 20 Hz
-freq_idx = np.where((C.frequencies > 10) * (C.frequencies < 30))[0]
-
-# compute average coherence
-coh = np.mean(C.coherence[:, :, freq_idx], -1)  # Averaging on last dimension
-drawmatrix_channels(coh, epochs.ch_names, color_anchor=0,
-                    title='MEG gradiometer coherence')
-
-plt.show()
diff --git a/examples/export/plot_evoked_to_nitime.py b/examples/export/plot_evoked_to_nitime.py
deleted file mode 100644
index b1ad268..0000000
--- a/examples/export/plot_evoked_to_nitime.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-============================
-Export evoked data to Nitime
-============================
-
-"""
-# Author: Denis Engemann <denis.engemann at gmail.com>
-#         Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import mne
-from mne.datasets import sample
-from nitime.viz import plot_tseries
-import matplotlib.pyplot as plt
-
-
-data_path = sample.data_path()
-
-fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
-
-# Reading
-evoked = mne.read_evokeds(fname, condition=0, baseline=(None, 0), proj=True)
-
-# Pick channels to view
-picks = mne.pick_types(evoked.info, meg='grad', eeg=False, exclude='bads')
-
-evoked_ts = evoked.to_nitime(picks=picks)
-
-plot_tseries(evoked_ts)
-
-plt.show()
diff --git a/examples/export/plot_raw_to_nitime.py b/examples/export/plot_raw_to_nitime.py
deleted file mode 100644
index 3f9be8c..0000000
--- a/examples/export/plot_raw_to_nitime.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-============================
-Export Raw Objects to NiTime
-============================
-
-This script shows how to export raw files to the NiTime library
-for further signal processing and data analysis.
-
-"""
-
-# Author: Denis Engemann <denis.engemann at gmail.com>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import numpy as np
-import mne
-
-from mne.io import Raw
-from mne.datasets import sample
-
-data_path = sample.data_path()
-raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-
-###############################################################################
-# get raw data
-raw = Raw(raw_fname)
-
-# set picks
-picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                       stim=False, exclude='bads')
-
-# pick times relative to the onset of the MEG measurement.
-start, stop = raw.time_as_index([100, 115], use_first_samp=False)
-
-# export to nitime using a copy of the data
-raw_ts = raw.to_nitime(start=start, stop=stop, picks=picks, copy=True)
-
-###############################################################################
-# explore some nitime timeseries features
-
-# get start
-print(raw_ts.t0)
-
-# get duration
-print(raw_ts.duration)
-
-# get sample duration (sampling interval)
-print(raw_ts.sampling_interval)
-
-# get exported raw infor
-print(list(raw_ts.metadata.keys()))
-
-# index at certain time
-print(raw_ts.at(110.5))
-
-# get channel names (attribute added during export)
-print(raw_ts.ch_names[:3])
-
-###############################################################################
-# investigate spectral density
-
-import matplotlib.pyplot as plt
-
-import nitime.algorithms as tsa
-
-ch_sel = raw_ts.ch_names.index('MEG 0122')
-
-data_ch = raw_ts.data[ch_sel]
-
-f, psd_mt, nu = tsa.multi_taper_psd(data_ch, Fs=raw_ts.sampling_rate,
-                                    BW=1, adaptive=False, jackknife=False)
-
-# Convert PSD to dB
-psd_mt = 10 * np.log10(psd_mt)
-
-plt.close('all')
-plt.plot(f, psd_mt)
-plt.xlabel('Frequency (Hz)')
-plt.ylabel('Power Spectrald Density (db/Hz)')
-plt.title('Multitaper Power Spectrum \n %s' % raw_ts.ch_names[ch_sel])
-plt.show()
diff --git a/examples/forward/README.txt b/examples/forward/README.txt
new file mode 100644
index 0000000..89db558
--- /dev/null
+++ b/examples/forward/README.txt
@@ -0,0 +1,6 @@
+
+Forward modeling
+----------------
+
+From BEM segmentation, coregistration, setting up source spaces
+to actual computation of forward solution.
diff --git a/examples/plot_bem_contour_mri.py b/examples/forward/plot_bem_contour_mri.py
similarity index 98%
rename from examples/plot_bem_contour_mri.py
rename to examples/forward/plot_bem_contour_mri.py
index fd5e57f..234a08c 100644
--- a/examples/plot_bem_contour_mri.py
+++ b/examples/forward/plot_bem_contour_mri.py
@@ -17,6 +17,8 @@ BEM segmentations which are required for computing the forward solution.
 from mne.viz import plot_bem
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 
diff --git a/examples/plot_coregistration_transform.py b/examples/forward/plot_coregistration_transform.py
similarity index 91%
rename from examples/plot_coregistration_transform.py
rename to examples/forward/plot_coregistration_transform.py
index 2ad05c1..e6e10b5 100644
--- a/examples/plot_coregistration_transform.py
+++ b/examples/forward/plot_coregistration_transform.py
@@ -17,6 +17,8 @@ from mne import read_evokeds
 from mne.datasets import sample
 from mne.viz import plot_trans
 
+print(__doc__)
+
 
 data_path = sample.data_path()
 
@@ -27,5 +29,5 @@ trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
 
 condition = 'Left Auditory'
 evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0))
-plot_trans(evoked.info, trans_fname=trans_fname, subject='sample',
+plot_trans(evoked.info, trans_fname, subject='sample', dig=True,
            subjects_dir=subjects_dir)
diff --git a/examples/plot_decimate_head_surface.py b/examples/forward/plot_decimate_head_surface.py
similarity index 52%
rename from examples/plot_decimate_head_surface.py
rename to examples/forward/plot_decimate_head_surface.py
index 636985a..90a0cdf 100644
--- a/examples/plot_decimate_head_surface.py
+++ b/examples/forward/plot_decimate_head_surface.py
@@ -8,32 +8,29 @@ using a cloud of digitization points for coordinate alignment
 instead of e.g. EEG-cap positions.
 
 """
-print(__doc__)
-
 # Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
 import mne
-from mne.surface import decimate_surface
+from mne.surface import decimate_surface  # noqa
+
+print(__doc__)
 
 path = mne.datasets.sample.data_path()
 surf = mne.read_bem_surfaces(path + '/subjects/sample/bem/sample-head.fif')[0]
 
 points, triangles = surf['rr'], surf['tris']
 
-# reduce to 30000 meshes equaling ${SUBJECT}-head-medium.fif output from
-# mne_make_scalp_surfaces.py and mne_make_scalp_surfaces
-points_dec, triangles_dec = decimate_surface(points, triangles,
-                                             n_triangles=30000)
+# # reduce to 30000 meshes equaling ${SUBJECT}-head-medium.fif output from
+# # mne_make_scalp_surfaces.py and mne_make_scalp_surfaces
+# points_dec, triangles_dec = decimate_surface(points, triangles,
+#                                              n_triangles=30000)
 
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
+# from mayavi import mlab  # noqa
 
-head_col = (0.95, 0.83, 0.83)  # light pink
+# head_col = (0.95, 0.83, 0.83)  # light pink
 
-p, t = points_dec, triangles_dec
-mlab.triangular_mesh(p[:, 0], p[:, 1], p[:, 2], t, color=head_col)
+# p, t = points_dec, triangles_dec
+# mlab.triangular_mesh(p[:, 0], p[:, 1], p[:, 2], t, color=head_col)
diff --git a/examples/forward/plot_left_cerebellum_volume_source.py b/examples/forward/plot_left_cerebellum_volume_source.py
new file mode 100644
index 0000000..6a8b3c3
--- /dev/null
+++ b/examples/forward/plot_left_cerebellum_volume_source.py
@@ -0,0 +1,96 @@
+"""
+==============================================
+Generate a left cerebellum volume source space
+==============================================
+
+Generate a volume source space of the left cerebellum and plot its vertices
+relative to the left cortical surface source space and the freesurfer
+segmentation file.
+
+"""
+
+# Author: Alan Leggitt <alan.leggitt at ucsf.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.spatial import ConvexHull
+from mayavi import mlab
+from mne import setup_source_space, setup_volume_source_space
+from mne.datasets import sample
+
+print(__doc__)
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+subj = 'sample'
+aseg_fname = subjects_dir + '/sample/mri/aseg.mgz'
+
+###############################################################################
+# Setup the source spaces
+
+# setup a cortical surface source space and extract left hemisphere
+surf = setup_source_space(subj, subjects_dir=subjects_dir,
+                          add_dist=False, overwrite=True)
+lh_surf = surf[0]
+
+# setup a volume source space of the left cerebellum cortex
+volume_label = 'Left-Cerebellum-Cortex'
+sphere = (0, 0, 0, 120)
+lh_cereb = setup_volume_source_space(subj, mri=aseg_fname, sphere=sphere,
+                                     volume_label=volume_label,
+                                     subjects_dir=subjects_dir)
+
+###############################################################################
+# Plot the positions of each source space
+
+# extract left cortical surface vertices, triangle faces, and surface normals
+x1, y1, z1 = lh_surf['rr'].T
+faces = lh_surf['use_tris']
+normals = lh_surf['nn']
+# normalize for mayavi
+normals /= np.sum(normals * normals, axis=1)[:, np.newaxis]
+
+# extract left cerebellum cortex source positions
+x2, y2, z2 = lh_cereb[0]['rr'][lh_cereb[0]['inuse'].astype(bool)].T
+
+# open a 3d figure in mayavi
+mlab.figure(1, bgcolor=(0, 0, 0))
+
+# plot the left cortical surface
+mesh = mlab.pipeline.triangular_mesh_source(x1, y1, z1, faces)
+mesh.data.point_data.normals = normals
+mlab.pipeline.surface(mesh, color=3 * (0.7,))
+
+# plot the convex hull bounding the left cerebellum
+hull = ConvexHull(np.c_[x2, y2, z2])
+mlab.triangular_mesh(x2, y2, z2, hull.simplices, color=3 * (0.5,), opacity=0.3)
+
+# plot the left cerebellum sources
+mlab.points3d(x2, y2, z2, color=(1, 1, 0), scale_factor=0.001)
+
+# adjust view parameters
+mlab.view(173.78, 101.75, 0.30, np.array([-0.03, -0.01,  0.03]))
+mlab.roll(85)
+
+##############################################################################
+# Compare volume source locations to segmentation file in freeview
+
+# Export source positions to nift file
+nii_fname = data_path + '/MEG/sample/mne_sample_lh-cerebellum-cortex.nii'
+
+# Combine the source spaces
+src = surf + lh_cereb
+
+src.export_volume(nii_fname, mri_resolution=True)
+
+# Uncomment the following lines to display source positions in freeview.
+'''
+# display image in freeview
+from mne.utils import run_subprocess
+mri_fname = subjects_dir + '/sample/mri/brain.mgz'
+run_subprocess(['freeview', '-v', mri_fname, '-v',
+                '%s:colormap=lut:opacity=0.5' % aseg_fname, '-v',
+                '%s:colormap=jet:colorscale=0,2' % nii_fname, '-slice',
+                '157 75 105'])
+'''
diff --git a/examples/plot_make_forward.py b/examples/forward/plot_make_forward.py
similarity index 79%
copy from examples/plot_make_forward.py
copy to examples/forward/plot_make_forward.py
index 69af985..473d94a 100644
--- a/examples/plot_make_forward.py
+++ b/examples/forward/plot_make_forward.py
@@ -2,24 +2,31 @@
 ======================================================
 Create a forward operator and display sensitivity maps
 ======================================================
+
+Sensitivity maps can be produced from forward operators that
+indicate how well different sensor types will be able to detect
+neural currents from different regions of the brain.
 """
 # Author: Eric Larson <larson.eric.d at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
+import matplotlib.pyplot as plt
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-mri = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
+trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
 src = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
 bem = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
 subjects_dir = data_path + '/subjects'
 
-fwd = mne.make_forward_solution(raw_fname, mri=mri, src=src, bem=bem,
+# Note that forward solutions can also be read with read_forward_solution
+fwd = mne.make_forward_solution(raw_fname, trans, src, bem,
                                 fname=None, meg=True, eeg=True, mindist=5.0,
                                 n_jobs=2, overwrite=True)
 
@@ -36,14 +43,14 @@ eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
 ###############################################################################
 # Show gain matrix a.k.a. leadfield matrix with sensitivity map
 
-import matplotlib.pyplot as plt
 picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
 picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
 
 fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
 fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
 for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
-    im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto')
+    im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
+                   cmap='RdBu_r')
     ax.set_title(ch_type.upper())
     ax.set_xlabel('sources')
     ax.set_ylabel('sensors')
@@ -60,6 +67,5 @@ plt.xlabel('sensitivity')
 plt.ylabel('count')
 plt.show()
 
-args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7)
-grad_map.plot(subject='sample', time_label='Gradiometer sensitivity',
-              subjects_dir=subjects_dir, **args)
+grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
+              clim=dict(lims=[0, 50, 100]))
diff --git a/examples/plot_read_bem_surfaces.py b/examples/forward/plot_read_bem_surfaces.py
similarity index 87%
rename from examples/plot_read_bem_surfaces.py
rename to examples/forward/plot_read_bem_surfaces.py
index 2d1f177..46852ac 100644
--- a/examples/plot_read_bem_surfaces.py
+++ b/examples/forward/plot_read_bem_surfaces.py
@@ -2,20 +2,22 @@
 ============================================
 Reading BEM surfaces from a forward solution
 ============================================
+
+Plot BEM surfaces used for forward solution generation.
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
 
-surfaces = mne.read_bem_surfaces(fname, add_geom=True)
+surfaces = mne.read_bem_surfaces(fname, patch_stats=True)
 
 print("Number of surfaces : %d" % len(surfaces))
 
@@ -27,10 +29,7 @@ brain_col = (0.67, 0.89, 0.91)  # light blue
 colors = [head_col, skull_col, brain_col]
 
 # 3D source space
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
+from mayavi import mlab  # noqa
 
 mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
 for c, surf in zip(colors, surfaces):
diff --git a/examples/plot_make_forward.py b/examples/forward/plot_read_forward.py
similarity index 59%
rename from examples/plot_make_forward.py
rename to examples/forward/plot_read_forward.py
index 69af985..ddf9157 100644
--- a/examples/plot_make_forward.py
+++ b/examples/forward/plot_read_forward.py
@@ -1,65 +1,67 @@
 """
-======================================================
-Create a forward operator and display sensitivity maps
-======================================================
+====================================================
+Read a forward operator and display sensitivity maps
+====================================================
+
+Forward solutions can be read using read_forward_solution in Python.
 """
-# Author: Eric Larson <larson.eric.d at gmail.com>
+# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
+import matplotlib.pyplot as plt
+
+print(__doc__)
+
 data_path = sample.data_path()
 
-raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-mri = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
-src = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
-bem = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
+fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 subjects_dir = data_path + '/subjects'
 
-fwd = mne.make_forward_solution(raw_fname, mri=mri, src=src, bem=bem,
-                                fname=None, meg=True, eeg=True, mindist=5.0,
-                                n_jobs=2, overwrite=True)
-
-# convert to surface orientation for better visualization
-fwd = mne.convert_forward_solution(fwd, surf_ori=True)
+fwd = mne.read_forward_solution(fname, surf_ori=True)
 leadfield = fwd['sol']['data']
 
 print("Leadfield size : %d x %d" % leadfield.shape)
 
-grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
-mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
-eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
-
 ###############################################################################
 # Show gain matrix a.k.a. leadfield matrix with sensitivity map
 
-import matplotlib.pyplot as plt
 picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
 picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
 
 fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
 fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
+
 for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
-    im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto')
+    im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
+                   cmap='RdBu_r')
     ax.set_title(ch_type.upper())
     ax.set_xlabel('sources')
     ax.set_ylabel('sensors')
     plt.colorbar(im, ax=ax, cmap='RdBu_r')
-plt.show()
+
+###############################################################################
+# Show sensitivity of each sensor type to dipoles in the source space
+
+grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
+mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
+eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
 
 plt.figure()
 plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
          bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
          color=['c', 'b', 'k'])
-plt.legend()
 plt.title('Normal orientation sensitivity')
 plt.xlabel('sensitivity')
 plt.ylabel('count')
-plt.show()
+plt.legend()
+
+# Cautious smoothing to see actual dipoles
+grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
+              clim=dict(lims=[0, 50, 100]))
 
-args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7)
-grad_map.plot(subject='sample', time_label='Gradiometer sensitivity',
-              subjects_dir=subjects_dir, **args)
+# Note. The source space uses min-dist and therefore discards most
+# superficial dipoles. This is why parts of the gyri are not covered.
diff --git a/examples/forward/plot_source_space_morphing.py b/examples/forward/plot_source_space_morphing.py
new file mode 100644
index 0000000..c07ac0d
--- /dev/null
+++ b/examples/forward/plot_source_space_morphing.py
@@ -0,0 +1,68 @@
+"""
+=========================
+Use source space morphing
+=========================
+
+This example shows how to use source space morphing (as opposed to
+SourceEstimate morphing) to create data that can be compared between
+subjects.
+
+.. warning:: Source space morphing will likely lead to source spaces that are
+             less evenly sampled than source spaces created for individual
+             subjects. Use with caution and check effects on localization
+             before use.
+"""
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+import mne
+
+data_path = mne.datasets.sample.data_path()
+subjects_dir = op.join(data_path, 'subjects')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_raw-trans.fif')
+fname_bem = op.join(subjects_dir, 'sample', 'bem',
+                    'sample-5120-bem-sol.fif')
+fname_src_fs = op.join(subjects_dir, 'fsaverage', 'bem',
+                       'fsaverage-ico-5-src.fif')
+raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
+
+# Get relevant channel information
+info = mne.io.read_info(raw_fname)
+info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False,
+                                          exclude=[]))
+
+# Morph fsaverage's source space to sample
+src_fs = mne.read_source_spaces(fname_src_fs)
+src_morph = mne.morph_source_spaces(src_fs, subject_to='sample',
+                                    subjects_dir=subjects_dir)
+
+# Compute the forward with our morphed source space
+fwd = mne.make_forward_solution(info, trans=fname_trans,
+                                src=src_morph, bem=fname_bem)
+# fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True)
+mag_map = mne.sensitivity_map(fwd, ch_type='mag')
+
+# Return this SourceEstimate (on sample's surfaces) to fsaverage's surfaces
+mag_map_fs = mag_map.to_original_src(src_fs, subjects_dir=subjects_dir)
+
+# Plot the result, which tracks the sulcal-gyral folding
+# outliers may occur, we'll place the cutoff at 99 percent.
+kwargs = dict(clim=dict(kind='percent', lims=[0, 50, 99]),
+              # no smoothing, let's see the dipoles on the cortex.
+              smoothing_steps=1, hemi='rh', views=['lat'])
+
+# Now note that the dipoles on fsaverage are almost equidistant while
+# morphing will distribute the dipoles unevenly across the given subject's
+# cortical surface to achieve the closest approximation to the average brain.
+# Our testing code suggests a correlation of higher than 0.99.
+
+brain_subject = mag_map.plot(  # plot forward in subject source space (morphed)
+    time_label=None, subjects_dir=subjects_dir, **kwargs)
+
+brain_fs = mag_map_fs.plot(  # plot forward in original source space (remapped)
+    time_label=None, subjects_dir=subjects_dir, **kwargs)
diff --git a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
index 30de479..aa924bf 100644
--- a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
+++ b/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py
@@ -7,21 +7,21 @@ Compute dSPM inverse solution on single trial epochs restricted
 to a brain label.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
 from mne.minimum_norm import apply_inverse
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
 fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
@@ -51,7 +51,7 @@ raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick MEG channels
 picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   include=include, exclude='bads')
+                       include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
diff --git a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
index ba69df1..5c36f17 100644
--- a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
+++ b/examples/inverse/plot_compute_mne_inverse_raw_in_label.py
@@ -8,19 +8,18 @@ to a brain label and stores the solution in stc files for
 visualisation.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
 
+print(__doc__)
 
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
diff --git a/examples/inverse/plot_compute_mne_inverse_volume.py b/examples/inverse/plot_compute_mne_inverse_volume.py
index 253bd12..c1faff0 100644
--- a/examples/inverse/plot_compute_mne_inverse_volume.py
+++ b/examples/inverse/plot_compute_mne_inverse_volume.py
@@ -7,19 +7,21 @@ Compute dSPM inverse solution on MNE evoked dataset in a volume source
 space and stores the solution in a nifti file for visualisation.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
-import numpy as np
 import matplotlib.pyplot as plt
+
+from nilearn.plotting import plot_stat_map
+from nilearn.image import index_img
+
 from mne.datasets import sample
 from mne import read_evokeds
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
 fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
@@ -42,18 +44,11 @@ img = stc.as_volume(src,
                     mri_resolution=False)  # set True for full MRI resolution
 
 # Save it as a nifti file
-import nibabel as nib
-nib.save(img, 'mne_%s_inverse.nii.gz' % method)
-
-data = img.get_data()
-
-# Plot result (one slice)
-coronal_slice = data[:, 10, :, 60]
-plt.close('all')
-plt.imshow(np.ma.masked_less(coronal_slice, 8), cmap=plt.cm.Reds,
-           interpolation='nearest')
-plt.colorbar()
-plt.contour(coronal_slice != 0, 1, colors=['black'])
-plt.xticks([])
-plt.yticks([])
+# nib.save(img, 'mne_%s_inverse.nii.gz' % method)
+
+t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
+
+# Plotting with nilearn ######################################################
+plot_stat_map(index_img(img, 61), t1_fname, threshold=8.,
+              title='%s (t=%.1f s.)' % (method, stc.times[61]))
 plt.show()
diff --git a/examples/inverse/plot_covariance_whitening_dspm.py b/examples/inverse/plot_covariance_whitening_dspm.py
new file mode 100644
index 0000000..dea93dc
--- /dev/null
+++ b/examples/inverse/plot_covariance_whitening_dspm.py
@@ -0,0 +1,160 @@
+# doc:slow-example
+"""
+===================================================
+Demonstrate impact of whitening on source estimates
+===================================================
+
+This example demonstrates the relationship between the noise covariance
+estimate and the MNE / dSPM source amplitudes. It computes source estimates for
+the SPM faces data and compares proper regularization with insufficient
+regularization based on the methods described in [1]. The example demonstrates
+that improper regularization can lead to overestimation of source amplitudes.
+This example makes use of the previous, non-optimized code path that was used
+before implementing the suggestions presented in [1]. Please do not copy the
+patterns presented here for your own analysis, this is example is purely
+illustrative.
+
+Note that this example does quite a bit of processing, so even on a
+fast machine it can take a couple of minutes to complete.
+
+References
+----------
+[1] Engemann D. and Gramfort A. (2015) Automated model selection in covariance
+    estimation and spatial whitening of MEG and EEG signals, vol. 108,
+    328-342, NeuroImage.
+"""
+# Author: Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+
+import numpy as np
+from scipy.misc import imread
+import matplotlib.pyplot as plt
+
+import mne
+from mne import io
+from mne.datasets import spm_face
+from mne.minimum_norm import apply_inverse, make_inverse_operator
+from mne.cov import compute_covariance
+
+print(__doc__)
+
+##############################################################################
+# Get data
+
+data_path = spm_face.data_path()
+subjects_dir = data_path + '/subjects'
+
+raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
+
+raw = io.Raw(raw_fname % 1, preload=True)  # Take first run
+
+picks = mne.pick_types(raw.info, meg=True, exclude='bads')
+raw.filter(1, 30, method='iir', n_jobs=1)
+
+events = mne.find_events(raw, stim_channel='UPPT001')
+
+event_ids = {"faces": 1, "scrambled": 2}
+tmin, tmax = -0.2, 0.5
+baseline = None  # no baseline as high-pass is applied
+reject = dict(mag=3e-12)
+
+# Make source space
+
+trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
+src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
+                             overwrite=True, add_dist=False)
+bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
+forward = mne.make_forward_solution(raw.info, trans, src, bem)
+forward = mne.convert_forward_solution(forward, surf_ori=True)
+
+# inverse parameters
+conditions = 'faces', 'scrambled'
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = 'dSPM'
+clim = dict(kind='value', lims=[0, 2.5, 5])
+
+###############################################################################
+# Estimate covariance and show resulting source estimates
+
+method = 'empirical', 'shrunk'
+best_colors = 'steelblue', 'red'
+samples_epochs = 5, 15,
+fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 6))
+
+
+def brain_to_mpl(brain):
+    """convert image to be usable with matplotlib"""
+    tmp_path = op.abspath(op.join(op.curdir, 'my_tmp'))
+    brain.save_imageset(tmp_path, views=['ven'])
+    im = imread(tmp_path + '_ven.png')
+    os.remove(tmp_path + '_ven.png')
+    return im
+
+for n_train, (ax_stc_worst, ax_dynamics, ax_stc_best) in zip(samples_epochs,
+                                                             (axes1, axes2)):
+    # estimate covs based on a subset of samples
+    # make sure we have the same number of conditions.
+    events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
+                              for id_ in [event_ids[k] for k in conditions]])
+    epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
+                              baseline=baseline, preload=True, reject=reject)
+    epochs_train.equalize_event_counts(event_ids, copy=False)
+
+    noise_covs = compute_covariance(epochs_train, method=method,
+                                    tmin=None, tmax=0,  # baseline only
+                                    return_estimators=True)  # returns list
+    # prepare contrast
+    evokeds = [epochs_train[k].average() for k in conditions]
+
+    # compute stc based on worst and best
+    for est, ax, kind, color in zip(noise_covs, (ax_stc_worst, ax_stc_best),
+                                    ['best', 'worst'], best_colors):
+        # We skip empirical rank estimation that we introduced in response to
+        # the findings in reference [1] to use the naive code path that
+        # triggered the behavior described in [1]. The expected true rank is
+        # 274 for this dataset. Please do not do this with your data but
+        # rely on the default rank estimator that helps regularizing the
+        # covariance.
+        inverse_operator = make_inverse_operator(epochs_train.info, forward,
+                                                 est, loose=0.2, depth=0.8,
+                                                 rank=274)
+        stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
+                                      pick_ori=None) for e in evokeds)
+        stc = stc_a - stc_b
+        brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim)
+        brain.set_time(175)
+
+        im = brain_to_mpl(brain)
+        brain.close()
+        ax.axis('off')
+        ax.get_xaxis().set_visible(False)
+        ax.get_yaxis().set_visible(False)
+        ax.imshow(im)
+        ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
+
+        # plot spatial mean
+        stc_mean = stc.data.mean(0)
+        ax_dynamics.plot(stc.times * 1e3, stc_mean,
+                         label='{0} ({1})'.format(est['method'], kind),
+                         color=color)
+        # plot spatial std
+        stc_var = stc.data.std(0)
+        ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
+                                 stc_mean + stc_var, alpha=0.2, color=color)
+
+    # signal dynamics worst and best
+    ax_dynamics.set_title('{0} epochs'.format(n_train * 2))
+    ax_dynamics.set_xlabel('Time (ms)')
+    ax_dynamics.set_ylabel('Source Activation (dSPM)')
+    ax_dynamics.set_xlim(tmin * 1e3, tmax * 1e3)
+    ax_dynamics.set_ylim(-3, 3)
+    ax_dynamics.legend(loc='upper left', fontsize=10)
+
+fig.subplots_adjust(hspace=0.4, left=0.03, right=0.98, wspace=0.07)
+fig.canvas.draw()
+fig.show()
diff --git a/examples/inverse/plot_dics_beamformer.py b/examples/inverse/plot_dics_beamformer.py
index 8920603..c904661 100644
--- a/examples/inverse/plot_dics_beamformer.py
+++ b/examples/inverse/plot_dics_beamformer.py
@@ -11,13 +11,10 @@ The original reference for DICS is:
 Gross et al. Dynamic imaging of coherent sources: Studying neural interactions
 in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
 """
-
 # Author: Roman Goj <roman.goj at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 
 import matplotlib.pyplot as plt
@@ -28,6 +25,8 @@ from mne.datasets import sample
 from mne.time_frequency import compute_epochs_csd
 from mne.beamformer import dics
 
+print(__doc__)
+
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
@@ -78,12 +77,10 @@ plt.ylabel('DICS value')
 plt.title('DICS time course of the 30 largest sources.')
 plt.show()
 
-# Plot brain in 3D with PySurfer if available. Note that the subject name
-# is already known by the SourceEstimate stc object.
-brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
+# Plot brain in 3D with PySurfer if available
+brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
 brain.set_data_time_index(180)
-brain.scale_data_colormap(fmin=4, fmid=6, fmax=8, transparent=True)
 brain.show_view('lateral')
 
 # Uncomment to save image
-#brain.save_image('DICS_map.png')
+# brain.save_image('DICS_map.png')
diff --git a/examples/inverse/plot_dics_source_power.py b/examples/inverse/plot_dics_source_power.py
index 7eeca11..957af3d 100644
--- a/examples/inverse/plot_dics_source_power.py
+++ b/examples/inverse/plot_dics_source_power.py
@@ -10,21 +10,19 @@ The original reference for DICS is:
 Gross et al. Dynamic imaging of coherent sources: Studying neural interactions
 in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
 """
-
 # Author: Roman Goj <roman.goj at gmail.com>
 #         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
-
 from mne.io import Raw
 from mne.datasets import sample
 from mne.time_frequency import compute_epochs_csd
 from mne.beamformer import dics_source_power
 
+print(__doc__)
+
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
@@ -64,16 +62,12 @@ noise_csds = compute_epochs_csd(epochs, mode='multitaper', tmin=-0.11,
 # Compute DICS spatial filter and estimate source power
 stc = dics_source_power(epochs.info, forward, noise_csds, data_csds)
 
-from scipy.stats import scoreatpercentile  # for thresholding
-
+clim = dict(kind='value', lims=[1.6, 1.9, 2.2])
 for i, csd in enumerate(data_csds):
     message = 'DICS source power at %0.1f Hz' % csd.frequencies[0]
     brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
-                     time_label=message, figure=i)
-    fmin, fmax = [scoreatpercentile(stc.data[:, i], ii) for ii in [95, 100]]
-    fmid = fmin + (fmax - fmin) / 2
+                     time_label=message, figure=i, clim=clim)
     brain.set_data_time_index(i)
-    brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax, transparent=True)
     brain.show_view('lateral')
     # Uncomment line below to save images
-    #brain.save_image('DICS_source_power_freq_%d.png' % csd.frequencies[0])
+    # brain.save_image('DICS_source_power_freq_%d.png' % csd.frequencies[0])
diff --git a/examples/inverse/plot_dipole_fit.py b/examples/inverse/plot_dipole_fit.py
new file mode 100644
index 0000000..08eac64
--- /dev/null
+++ b/examples/inverse/plot_dipole_fit.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+"""
+===============
+Do a dipole fit
+===============
+
+This shows how to fit a dipole using mne-python.
+
+For a comparison of fits between MNE-C and mne-python, see:
+
+    https://gist.github.com/Eric89GXL/ca55f791200fe1dc3dd2
+
+"""
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+from os import path as op
+
+import mne
+
+print(__doc__)
+
+data_path = mne.datasets.sample.data_path()
+subjects_dir = op.join(data_path, 'subjects')
+fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_raw-trans.fif')
+fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
+
+# Let's localize the N100m (using MEG only)
+evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
+                          baseline=(None, 0))
+evoked.pick_types(meg=True, eeg=False)
+evoked.crop(0.07, 0.08)
+
+# Fit a dipole
+dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
+
+# Plot the result
+dip.plot_locations(fname_trans, 'sample', subjects_dir)
diff --git a/examples/inverse/plot_dipole_fit_result.py b/examples/inverse/plot_dipole_fit_result.py
deleted file mode 100644
index ab6582c..0000000
--- a/examples/inverse/plot_dipole_fit_result.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-==============================================================
-Reading a .dip file form xfit and view with source space in 3D
-==============================================================
-
-Here the .dip file was generated with the mne_dipole_fit command.
-
-Detailed unix command is :
-
-$mne_dipole_fit --meas sample_audvis-ave.fif --set 1 --meg --tmin 40 --tmax 95 \
-    --bmin -200 --bmax 0 --noise sample_audvis-cov.fif \
-    --bem ../../subjects/sample/bem/sample-5120-bem-sol.fif \
-    --origin 0:0:40 --mri sample_audvis-meg-oct-6-fwd.fif \
-    --dip sample_audvis_set1.dip
-
-"""
-# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import numpy as np
-import mne
-from mne.datasets import sample
-
-data_path = sample.data_path()
-fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
-dip_fname = data_path + '/MEG/sample/sample_audvis_set1.dip'
-bem_fname = data_path + '/subjects/sample/bem/sample-5120-bem-sol.fif'
-
-brain_surface = mne.read_bem_surfaces(bem_fname, add_geom=True)[0]
-points = brain_surface['rr']
-faces = brain_surface['tris']
-
-fwd = mne.read_forward_solution(fwd_fname)
-src = fwd['src']
-
-# read dipoles
-time, pos, amplitude, ori, gof = mne.read_dip(dip_fname)
-
-print("Time (ms): %s" % time)
-print("Amplitude (nAm): %s" % amplitude)
-print("GOF (%%): %s" % gof)
-
-# only plot those for which GOF is above 50%
-pos = pos[gof > 50.]
-ori = ori[gof > 50.]
-time = time[gof > 50.]
-
-###############################################################################
-# Show result on 3D source space
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
-
-lh_points = src[0]['rr']
-lh_faces = src[0]['use_tris']
-mlab.figure(size=(600, 600), bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
-
-# show brain surface after proper coordinate system transformation
-points = brain_surface['rr']
-faces = brain_surface['tris']
-coord_trans = fwd['mri_head_t']['trans']
-points = np.dot(coord_trans[:3,:3], points.T).T + coord_trans[:3,-1]
-mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
-                     faces, color=(1, 1, 0), opacity=0.3)
-
-# show one cortical surface
-mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
-                     lh_faces, color=(0.7, ) * 3)
-
-# show dipole as small cones
-dipoles = mlab.quiver3d(pos[:,0], pos[:,1], pos[:,2],
-                        ori[:,0], ori[:,1], ori[:,2],
-                        opacity=1., scale_factor=4e-4, scalars=time,
-                        mode='cone')
-mlab.colorbar(dipoles, title='Dipole fit time (ms)')
-
-# proper 3D orientation
-mlab.get_engine().scenes[0].scene.x_plus_view()
diff --git a/examples/inverse/plot_gamma_map_inverse.py b/examples/inverse/plot_gamma_map_inverse.py
index fd64fba..15b0e48 100644
--- a/examples/inverse/plot_gamma_map_inverse.py
+++ b/examples/inverse/plot_gamma_map_inverse.py
@@ -10,8 +10,6 @@ NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 
 import mne
@@ -19,6 +17,8 @@ from mne.datasets import sample
 from mne.inverse_sparse import gamma_map
 from mne.viz import plot_sparse_source_estimates
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
@@ -35,7 +35,7 @@ evoked.crop(tmin=-50e-3, tmax=300e-3)
 forward = mne.read_forward_solution(fwd_fname, surf_ori=True,
                                     force_fixed=False)
 
-# Read noise covariance matrix and regularize it
+# Read noise noise covariance matrix and regularize it
 cov = mne.read_cov(cov_fname)
 cov = mne.cov.regularize(cov, evoked.info)
 
@@ -50,16 +50,17 @@ stc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True,
 scale_factors = np.max(np.abs(stc.data), axis=1)
 scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))
 
-plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
+plot_sparse_source_estimates(
+    forward['src'], stc, bgcolor=(1, 1, 1),
     modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),
     fig_name="Gamma-MAP")
 
 # Show the evoked response and the residual for gradiometers
 ylim = dict(grad=[-120, 120])
-evoked = mne.pick_types_evoked(evoked, meg='grad', exclude='bads')
+evoked.pick_types(meg='grad', exclude='bads')
 evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
             proj=True)
 
-residual = mne.pick_types_evoked(residual, meg='grad', exclude='bads')
+residual.pick_types(meg='grad', exclude='bads')
 residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
               proj=True)
diff --git a/examples/inverse/plot_label_activation_from_stc.py b/examples/inverse/plot_label_activation_from_stc.py
index e5c17b0..396d5c7 100644
--- a/examples/inverse/plot_label_activation_from_stc.py
+++ b/examples/inverse/plot_label_activation_from_stc.py
@@ -1,62 +1,62 @@
-"""
-==================================================
-Extracting time course from source_estimate object
-==================================================
-
-Load a SourceEstimate object from stc files and
-extract the time course of activation in
-individual labels, as well as in a complex label
-formed through merging two labels.
-
-"""
-# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
-#
-# License: BSD (3-clause)
-
+"""
+==================================================
+Extracting time course from source_estimate object
+==================================================
+
+Load a SourceEstimate object from stc files and
+extract the time course of activation in
+individual labels, as well as in a complex label
+formed through merging two labels.
+
+"""
+# Author: Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: BSD (3-clause)
+
+import os
+
+import mne
+from mne.datasets import sample
+import matplotlib.pyplot as plt
+
 print(__doc__)
-
-import os
-
-import mne
-from mne.datasets import sample
-import matplotlib.pyplot as plt
-
-data_path = sample.data_path()
-os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
-meg_path = data_path + '/MEG/sample'
-
-# load the stc
-stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
-
-# load the labels
-aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
-aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
-
-# extract the time course for different labels from the stc
-stc_lh = stc.in_label(aud_lh)
-stc_rh = stc.in_label(aud_rh)
-stc_bh = stc.in_label(aud_lh + aud_rh)
-
-# calculate center of mass and transform to mni coordinates
-vtx, _, t_lh = stc_lh.center_of_mass('sample')
-mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
-vtx, _, t_rh = stc_rh.center_of_mass('sample')
-mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
-
-# plot the activation
-plt.figure()
-plt.axes([.1, .275, .85, .625])
-hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')
-hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')
-hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')
-plt.xlabel('Time (s)')
-plt.ylabel('Source amplitude (dSPM)')
-plt.xlim(stc.times[0], stc.times[-1])
-
-# add a legend including center-of-mass mni coordinates to the plot
-labels = ['LH: center of mass = %s' % mni_lh.round(2),
-          'RH: center of mass = %s' % mni_rh.round(2),
-          'Combined LH & RH']
-plt.figlegend([hl, hr, hb], labels, 'lower center')
-plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
-plt.show()
+
+data_path = sample.data_path()
+os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
+meg_path = data_path + '/MEG/sample'
+
+# load the stc
+stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
+
+# load the labels
+aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
+aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
+
+# extract the time course for different labels from the stc
+stc_lh = stc.in_label(aud_lh)
+stc_rh = stc.in_label(aud_rh)
+stc_bh = stc.in_label(aud_lh + aud_rh)
+
+# calculate center of mass and transform to mni coordinates
+vtx, _, t_lh = stc_lh.center_of_mass('sample')
+mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
+vtx, _, t_rh = stc_rh.center_of_mass('sample')
+mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
+
+# plot the activation
+plt.figure()
+plt.axes([.1, .275, .85, .625])
+hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
+hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
+hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
+plt.xlabel('Time (s)')
+plt.ylabel('Source amplitude (dSPM)')
+plt.xlim(stc.times[0], stc.times[-1])
+
+# add a legend including center-of-mass mni coordinates to the plot
+labels = ['LH: center of mass = %s' % mni_lh.round(2),
+          'RH: center of mass = %s' % mni_rh.round(2),
+          'Combined LH & RH']
+plt.figlegend([hl, hr, hb], labels, 'lower center')
+plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
+plt.show()
diff --git a/examples/inverse/plot_label_from_stc.py b/examples/inverse/plot_label_from_stc.py
index b03274c..7ecd1ec 100644
--- a/examples/inverse/plot_label_from_stc.py
+++ b/examples/inverse/plot_label_from_stc.py
@@ -11,8 +11,6 @@ functional label. As expected the time course in the functional
 label yields higher values.
 
 """
-print(__doc__)
-
 # Author: Luke Bloy <luke.bloy at gmail.com>
 #         Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
 # License: BSD (3-clause)
@@ -24,6 +22,8 @@ import mne
 from mne.minimum_norm import read_inverse_operator, apply_inverse
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -47,7 +47,7 @@ src = inverse_operator['src']  # get the source space
 
 # Compute inverse solution
 stc = apply_inverse(evoked, inverse_operator, lambda2, method,
-                    pick_normal=True)
+                    pick_ori='normal')
 
 # Make an STC in the time interval of interest and take the mean
 stc_mean = stc.copy().crop(tmin, tmax).mean()
@@ -95,10 +95,8 @@ plt.legend()
 plt.show()
 
 ###############################################################################
-# Plot brain in 3D with PySurfer if available. Note that the subject name
-# is already known by the SourceEstimate stc object.
-brain = stc_mean.plot(surface='inflated', hemi='lh', subjects_dir=subjects_dir)
-brain.scale_data_colormap(fmin=0, fmid=350, fmax=700, transparent=True)
+# plot brain in 3D with PySurfer if available
+brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
 brain.show_view('lateral')
 
 # show both labels
diff --git a/examples/inverse/plot_label_source_activations.py b/examples/inverse/plot_label_source_activations.py
index c1f317f..4acc5ad 100644
--- a/examples/inverse/plot_label_source_activations.py
+++ b/examples/inverse/plot_label_source_activations.py
@@ -14,12 +14,14 @@ also using a sign flip.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, apply_inverse
 
+print(__doc__)
+
 data_path = sample.data_path()
 label = 'Aud-lh'
 label_fname = data_path + '/MEG/sample/labels/%s.label' % label
@@ -50,7 +52,6 @@ pca = stc.extract_label_time_course(label, src, mode='pca_flip')
 print("Number of vertices : %d" % len(stc_label.data))
 
 # View source activations
-import matplotlib.pyplot as plt
 plt.figure()
 plt.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5)
 h0, = plt.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3)
diff --git a/examples/inverse/plot_lcmv_beamformer.py b/examples/inverse/plot_lcmv_beamformer.py
index c7cea0d..811ad1f 100644
--- a/examples/inverse/plot_lcmv_beamformer.py
+++ b/examples/inverse/plot_lcmv_beamformer.py
@@ -7,13 +7,10 @@ Compute LCMV beamformer solutions on evoked dataset for three different choices
 of source orientation and stores the solutions in stc files for visualisation.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
 import numpy as np
 
@@ -22,11 +19,13 @@ from mne.datasets import sample
 from mne.io import Raw
 from mne.beamformer import lcmv
 
+print(__doc__)
+
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
 fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
-fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
 label_name = 'Aud-lh'
 fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
 
@@ -52,11 +51,10 @@ evoked = epochs.average()
 
 forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
 
+# Read regularized noise covariance and compute regularized data covariance
 noise_cov = mne.read_cov(fname_cov)
-noise_cov = mne.cov.regularize(noise_cov, evoked.info,
-                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
-
-data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
+                                  method='shrunk')
 
 plt.close('all')
 
@@ -70,13 +68,11 @@ for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
     stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
                pick_ori=pick_ori)
 
-    # Save result in stc files
-    stc.save('lcmv-' + name)
-
     # View activation time-series
-    data, times, _ = mne.label_time_courses(fname_label, "lcmv-" + name +
-                                            "-lh.stc")
-    plt.plot(1e3 * times, np.mean(data, axis=0), color, hold=True, label=desc)
+    label = mne.read_label(fname_label)
+    stc_label = stc.in_label(label)
+    plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
+             hold=True, label=desc)
 
 plt.xlabel('Time (ms)')
 plt.ylabel('LCMV value')
diff --git a/examples/inverse/plot_lcmv_beamformer_volume.py b/examples/inverse/plot_lcmv_beamformer_volume.py
index c2debc1..ee23493 100644
--- a/examples/inverse/plot_lcmv_beamformer_volume.py
+++ b/examples/inverse/plot_lcmv_beamformer_volume.py
@@ -8,26 +8,28 @@ space. It stores the solution in a nifti file for visualisation e.g. with
 Freeview.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 from mne.beamformer import lcmv
 
+from nilearn.plotting import plot_stat_map
+from nilearn.image import index_img
+
+print(__doc__)
 
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
 fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
-fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
 
 ###############################################################################
 # Get epochs
@@ -51,11 +53,10 @@ evoked = epochs.average()
 
 forward = mne.read_forward_solution(fname_fwd)
 
+# Read regularized noise covariance and compute regularized data covariance
 noise_cov = mne.read_cov(fname_cov)
-noise_cov = mne.cov.regularize(noise_cov, evoked.info,
-                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
-
-data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
+                                  method='shrunk')
 
 # Run free orientation (vector) beamformer. Source orientation can be
 # restricted by setting pick_ori to 'max-power' (or 'normal' but only when
@@ -69,19 +70,13 @@ stc.crop(0.0, 0.2)
 
 # Save result in a 4D nifti file
 img = mne.save_stc_as_volume('lcmv_inverse.nii.gz', stc,
-        forward['src'], mri_resolution=False)  # True for full MRI resolution
+                             forward['src'], mri_resolution=False)
 
-# plot result (one slice)
-plt.close('all')
-data = img.get_data()
-coronal_slice = data[:, 10, :, 60]
-plt.figure()
-plt.imshow(np.ma.masked_less(coronal_slice, 1), cmap=plt.cm.Reds,
-           interpolation='nearest')
-plt.colorbar()
-plt.contour(coronal_slice != 0, 1, colors=['black'])
-plt.xticks([])
-plt.yticks([])
+t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
+
+# Plotting with nilearn ######################################################
+plot_stat_map(index_img(img, 61), t1_fname, threshold=0.8,
+              title='LCMV (t=%.1f s.)' % stc.times[61])
 
 # plot source time courses with the maximum peak amplitudes
 plt.figure()
diff --git a/examples/inverse/plot_make_inverse_operator.py b/examples/inverse/plot_make_inverse_operator.py
index 4b4339e..669846d 100644
--- a/examples/inverse/plot_make_inverse_operator.py
+++ b/examples/inverse/plot_make_inverse_operator.py
@@ -8,23 +8,23 @@ inverse solution on MNE evoked dataset and stores the solution
 in stc files for visualisation.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import (make_inverse_operator, apply_inverse,
                               write_inverse_operator)
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname_fwd_meeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 fname_fwd_eeg = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif'
-fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
 fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 snr = 3.0
@@ -35,10 +35,6 @@ evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
 forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True)
 noise_cov = mne.read_cov(fname_cov)
 
-# regularize noise covariance
-noise_cov = mne.cov.regularize(noise_cov, evoked.info,
-                               mag=0.05, grad=0.05, eeg=0.1, proj=True)
-
 # Restrict forward solution as necessary for MEG
 forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
 # Alternatively, you can just load a forward solution that is restricted
diff --git a/examples/inverse/plot_mixed_norm_L21_inverse.py b/examples/inverse/plot_mixed_norm_inverse.py
similarity index 72%
rename from examples/inverse/plot_mixed_norm_L21_inverse.py
rename to examples/inverse/plot_mixed_norm_inverse.py
index a467ae8..ebafd51 100644
--- a/examples/inverse/plot_mixed_norm_L21_inverse.py
+++ b/examples/inverse/plot_mixed_norm_inverse.py
@@ -1,30 +1,41 @@
 """
 ================================================================
-Compute sparse inverse solution based on L1/L2 mixed norm (MxNE)
+Compute sparse inverse solution with mixed norm: MxNE and irMxNE
 ================================================================
 
+Runs (ir)MxNE (L1/L2 or L0.5/L2 mixed norm) inverse solver.
+L0.5/L2 is done with irMxNE which allows for sparser
+source estimates with less amplitude bias due to the non-convexity
+of the L0.5/L2 mixed norm penalty.
+
 See
 Gramfort A., Kowalski M. and Hamalainen, M,
 Mixed-norm estimates for the M/EEG inverse problem using accelerated
 gradient methods, Physics in Medicine and Biology, 2012
 http://dx.doi.org/10.1088/0031-9155/57/7/1937
+
+Strohmeier D., Haueisen J., and Gramfort A.:
+Improved MEG/EEG source localization with reweighted mixed-norms,
+4th International Workshop on Pattern Recognition in Neuroimaging,
+Tuebingen, 2014
+DOI: 10.1109/PRNI.2014.6858545
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
 from mne.inverse_sparse import mixed_norm
 from mne.minimum_norm import make_inverse_operator, apply_inverse
 from mne.viz import plot_sparse_source_estimates
 
+print(__doc__)
+
 data_path = sample.data_path()
 fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
-cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
 subjects_dir = data_path + '/subjects'
 
 # Read noise covariance matrix
@@ -36,15 +47,15 @@ evoked.crop(tmin=0, tmax=0.3)
 # Handling forward solution
 forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
 
-cov = mne.cov.regularize(cov, evoked.info)
-
 ylim = dict(eeg=[-10, 10], grad=[-400, 400], mag=[-600, 600])
 evoked.plot(ylim=ylim, proj=True)
 
 ###############################################################################
 # Run solver
-alpha = 70  # regularization parameter between 0 and 100 (100 is high)
+alpha = 50  # regularization parameter between 0 and 100 (100 is high)
 loose, depth = 0.2, 0.9  # loose orientation & depth weighting
+n_mxne_iter = 10  # if > 1 use L0.5/L2 reweighted mixed norm solver
+# if n_mxne_iter > 1 dSPM weighting can be avoided.
 
 # Compute dSPM solution to be used as weights in MxNE
 inverse_operator = make_inverse_operator(evoked.info, forward, cov,
@@ -52,18 +63,19 @@ inverse_operator = make_inverse_operator(evoked.info, forward, cov,
 stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
                          method='dSPM')
 
-# Compute MxNE inverse solution
+# Compute (ir)MxNE inverse solution
 stc, residual = mixed_norm(evoked, forward, cov, alpha, loose=loose,
                            depth=depth, maxit=3000, tol=1e-4,
                            active_set_size=10, debias=True, weights=stc_dspm,
-                           weights_min=8., return_residual=True)
-
+                           weights_min=8., n_mxne_iter=n_mxne_iter,
+                           return_residual=True)
 residual.plot(ylim=ylim, proj=True)
 
 ###############################################################################
 # View in 2D and 3D ("glass" brain like 3D plot)
 plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
-                             opacity=0.1, fig_name="MxNE (cond %s)" % condition)
+                             fig_name="MxNE (cond %s)" % condition,
+                             opacity=0.1)
 
 # and on the fsaverage brain after morphing
 stc_fsaverage = stc.morph(subject_from='sample', subject_to='fsaverage',
diff --git a/examples/inverse/plot_mne_crosstalk_function.py b/examples/inverse/plot_mne_crosstalk_function.py
index 551a404..3a7f38a 100644
--- a/examples/inverse/plot_mne_crosstalk_function.py
+++ b/examples/inverse/plot_mne_crosstalk_function.py
@@ -10,17 +10,18 @@ one label) to sources across the cortical surface. Sensitivity
 to sources outside the label is undesirable, and referred to as
 "leakage" or "cross-talk".
 """
-
 # Author: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+from mayavi import mlab
 
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import cross_talk_function, read_inverse_operator
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects/'
 fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
@@ -31,9 +32,8 @@ fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
                data_path + '/MEG/sample/labels/Vis-rh.label',
                data_path + '/MEG/sample/labels/Vis-lh.label']
 
-# In order to get gain matrix with fixed source orientation,
-# read forward solution with fixed orientations
-forward = mne.read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
+# read forward solution
+forward = mne.read_forward_solution(fname_fwd)
 
 # read label(s)
 labels = [mne.read_label(ss) for ss in fname_label]
@@ -47,35 +47,23 @@ mode = 'svd'
 n_svd_comp = 1
 
 method = 'MNE'  # can be 'MNE', 'dSPM', or 'sLORETA'
-stc_ctf_mne = cross_talk_function(inverse_operator, forward, labels,
-                                  method=method, lambda2=lambda2,
-                                  signed=False, mode=mode,
-                                  n_svd_comp=n_svd_comp)
+stc_ctf_mne = cross_talk_function(
+    inverse_operator, forward, labels, method=method, lambda2=lambda2,
+    signed=False, mode=mode, n_svd_comp=n_svd_comp)
 
 method = 'dSPM'
-stc_ctf_dspm = cross_talk_function(inverse_operator, forward, labels,
-                                   method=method, lambda2=lambda2,
-                                   signed=False, mode=mode,
-                                   n_svd_comp=n_svd_comp)
+stc_ctf_dspm = cross_talk_function(
+    inverse_operator, forward, labels, method=method, lambda2=lambda2,
+    signed=False, mode=mode, n_svd_comp=n_svd_comp)
 
-from mayavi import mlab
-fmin = 0.
 time_label = "MNE %d"
-fmax = stc_ctf_mne.data[:, 0].max()
-fmid = fmax / 2.
-brain_mne = stc_ctf_mne.plot(surface='inflated', hemi='rh',
-                             subjects_dir=subjects_dir,
-                             time_label=time_label, fmin=fmin,
-                             fmid=fmid, fmax=fmax,
+brain_mne = stc_ctf_mne.plot(hemi='rh', subjects_dir=subjects_dir,
+                             time_label=time_label,
                              figure=mlab.figure(size=(500, 500)))
 
 time_label = "dSPM %d"
-fmax = stc_ctf_dspm.data[:, 0].max()
-fmid = fmax / 2.
-brain_dspm = stc_ctf_dspm.plot(surface='inflated', hemi='rh',
-                               subjects_dir=subjects_dir,
-                               time_label=time_label, fmin=fmin,
-                               fmid=fmid, fmax=fmax,
+brain_dspm = stc_ctf_dspm.plot(hemi='rh', subjects_dir=subjects_dir,
+                               time_label=time_label,
                                figure=mlab.figure(size=(500, 500)))
 
 # Cross-talk functions for MNE and dSPM (and sLORETA) have the same shapes
diff --git a/examples/inverse/plot_mne_point_spread_function.py b/examples/inverse/plot_mne_point_spread_function.py
index 78cce34..a41da35 100644
--- a/examples/inverse/plot_mne_point_spread_function.py
+++ b/examples/inverse/plot_mne_point_spread_function.py
@@ -8,18 +8,19 @@ for linear inverse operators (MNE, dSPM, sLORETA).
 PSFs describe the spread of activation from one label
 across the cortical surface.
 """
-
 # Authors: Olaf Hauk <olaf.hauk at mrc-cbu.cam.ac.uk>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+from mayavi import mlab
 
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, point_spread_function
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects/'
 fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
@@ -32,7 +33,7 @@ fname_label = [data_path + '/MEG/sample/labels/Aud-rh.label',
                data_path + '/MEG/sample/labels/Vis-lh.label']
 
 
-# read forward solution (sources in surface-based coordinates)
+# read forward solution
 forward = mne.read_forward_solution(fname_fwd, force_fixed=False,
                                     surf_ori=True)
 
@@ -50,45 +51,27 @@ method = 'MNE'  # can be 'MNE' or 'sLORETA'
 mode = 'svd'
 n_svd_comp = 1
 
-stc_psf_eegmeg, _ = point_spread_function(inverse_operator_eegmeg,
-                                          forward, method=method,
-                                          labels=labels,
-                                          lambda2=lambda2,
-                                          pick_ori='normal',
-                                          mode=mode,
-                                          n_svd_comp=n_svd_comp)
-
-stc_psf_meg, _ = point_spread_function(inverse_operator_meg,
-                                       forward, method=method,
-                                       labels=labels,
-                                       lambda2=lambda2,
-                                       pick_ori='normal',
-                                       mode=mode,
-                                       n_svd_comp=n_svd_comp)
+stc_psf_eegmeg, _ = point_spread_function(
+    inverse_operator_eegmeg, forward, method=method, labels=labels,
+    lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
+
+stc_psf_meg, _ = point_spread_function(
+    inverse_operator_meg, forward, method=method, labels=labels,
+    lambda2=lambda2, pick_ori='normal', mode=mode, n_svd_comp=n_svd_comp)
 
 # save for viewing in mne_analyze in order of labels in 'labels'
 # last sample is average across PSFs
 # stc_psf_eegmeg.save('psf_eegmeg')
 # stc_psf_meg.save('psf_meg')
 
-from mayavi import mlab
-fmin = 0.
 time_label = "EEGMEG %d"
-fmax = stc_psf_eegmeg.data[:, 0].max()
-fmid = fmax / 2.
-brain_eegmeg = stc_psf_eegmeg.plot(surface='inflated', hemi='rh',
-                                   subjects_dir=subjects_dir,
-                                   time_label=time_label, fmin=fmin,
-                                   fmid=fmid, fmax=fmax,
+brain_eegmeg = stc_psf_eegmeg.plot(hemi='rh', subjects_dir=subjects_dir,
+                                   time_label=time_label,
                                    figure=mlab.figure(size=(500, 500)))
 
 time_label = "MEG %d"
-fmax = stc_psf_meg.data[:, 0].max()
-fmid = fmax / 2.
-brain_meg = stc_psf_meg.plot(surface='inflated', hemi='rh',
-                             subjects_dir=subjects_dir,
-                             time_label=time_label, fmin=fmin,
-                             fmid=fmid, fmax=fmax,
+brain_meg = stc_psf_meg.plot(hemi='rh', subjects_dir=subjects_dir,
+                             time_label=time_label,
                              figure=mlab.figure(size=(500, 500)))
 
 # The PSF is centred around the right auditory cortex label,
diff --git a/examples/inverse/plot_morph_data.py b/examples/inverse/plot_morph_data.py
index 5e1c496..7d9d162 100644
--- a/examples/inverse/plot_morph_data.py
+++ b/examples/inverse/plot_morph_data.py
@@ -13,16 +13,19 @@ is a source estimate defined on the anatomy of 'fsaverage'
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
-import numpy as np
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 
 subject_from = 'sample'
 subject_to = 'fsaverage'
+subjects_dir = data_path + '/subjects'
 
 fname = data_path + '/MEG/sample/sample_audvis-meg'
 src_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
@@ -35,19 +38,19 @@ stc_from = mne.read_source_estimate(fname)
 # But fsaverage's source space was set up so we can just do this:
 vertices_to = [np.arange(10242), np.arange(10242)]
 stc_to = mne.morph_data(subject_from, subject_to, stc_from, n_jobs=1,
-                        grade=vertices_to)
+                        grade=vertices_to, subjects_dir=subjects_dir)
 stc_to.save('%s_audvis-meg' % subject_to)
 
 # Morph using another method -- useful if you're going to do a lot of the
 # same inter-subject morphing operations; you could save and load morph_mat
-morph_mat = mne.compute_morph_matrix(subject_from, subject_to, stc_from.vertno,
-                                     vertices_to)
+morph_mat = mne.compute_morph_matrix(subject_from, subject_to,
+                                     stc_from.vertices, vertices_to,
+                                     subjects_dir=subjects_dir)
 stc_to_2 = mne.morph_data_precomputed(subject_from, subject_to,
                                       stc_from, vertices_to, morph_mat)
 stc_to_2.save('%s_audvis-meg_2' % subject_to)
 
 # View source activations
-import matplotlib.pyplot as plt
 plt.plot(stc_from.times, stc_from.data.mean(axis=0), 'r', label='from')
 plt.plot(stc_to.times, stc_to.data.mean(axis=0), 'b', label='to')
 plt.plot(stc_to_2.times, stc_to.data.mean(axis=0), 'g', label='to_2')
diff --git a/examples/inverse/plot_rap_music.py b/examples/inverse/plot_rap_music.py
new file mode 100644
index 0000000..8ef72d7
--- /dev/null
+++ b/examples/inverse/plot_rap_music.py
@@ -0,0 +1,57 @@
+"""
+================================
+Compute Rap-Music on evoked data
+================================
+
+Compute a Recursively Applied and Projected MUltiple Signal Classification
+(RAP-MUSIC) on evoked dataset.
+
+The reference for Rap-Music is:
+J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively
+applied and projected (RAP) MUSIC. Trans. Sig. Proc. 47, 2
+(February 1999), 332-340.
+DOI=10.1109/78.740118 http://dx.doi.org/10.1109/78.740118
+"""
+
+# Author: Yousra Bekhti <yousra.bekhti at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+
+from mne.datasets import sample
+from mne.beamformer import rap_music
+from mne.viz import plot_dipole_locations, plot_dipole_amplitudes
+
+print(__doc__)
+
+data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
+fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
+evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+
+# Read the evoked response and crop it
+condition = 'Right Auditory'
+evoked = mne.read_evokeds(evoked_fname, condition=condition,
+                          baseline=(None, 0))
+evoked.crop(tmin=0.05, tmax=0.15)  # select N100
+
+evoked.pick_types(meg=True, eeg=False)
+
+# Read the forward solution
+forward = mne.read_forward_solution(fwd_fname, surf_ori=True,
+                                    force_fixed=False)
+
+# Read noise covariance matrix
+noise_cov = mne.read_cov(cov_fname)
+
+dipoles, residual = rap_music(evoked, forward, noise_cov, n_dipoles=2,
+                              return_residual=True, verbose=True)
+trans = forward['mri_head_t']
+plot_dipole_locations(dipoles, trans, 'sample', subjects_dir=subjects_dir)
+plot_dipole_amplitudes(dipoles)
+
+# Plot the evoked data and the residual.
+evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]))
+residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]))
diff --git a/examples/inverse/plot_read_inverse.py b/examples/inverse/plot_read_inverse.py
index 8d3eebb..97177d3 100644
--- a/examples/inverse/plot_read_inverse.py
+++ b/examples/inverse/plot_read_inverse.py
@@ -1,17 +1,19 @@
 """
-=======================================================
-Reading an inverse operator and view source space in 3D
-=======================================================
+===========================
+Reading an inverse operator
+===========================
+
+The inverse operator's source space is shown in 3D.
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path
 fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -29,13 +31,13 @@ lh_points = inv['src'][0]['rr']
 lh_faces = inv['src'][0]['use_tris']
 rh_points = inv['src'][1]['rr']
 rh_faces = inv['src'][1]['use_tris']
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
+from mayavi import mlab  # noqa
 
 mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
-mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
-                     lh_faces)
-mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
-                     rh_faces)
+mesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+                            lh_faces, colormap='RdBu')
+mesh.module_manager.scalar_lut_manager.reverse_lut = True
+
+mesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+                            rh_faces, colormap='RdBu')
+mesh.module_manager.scalar_lut_manager.reverse_lut = True
diff --git a/examples/inverse/plot_read_source_space.py b/examples/inverse/plot_read_source_space.py
index e019f47..2bf7776 100644
--- a/examples/inverse/plot_read_source_space.py
+++ b/examples/inverse/plot_read_source_space.py
@@ -2,35 +2,39 @@
 ==============================================
 Reading a source space from a forward operator
 ==============================================
+
+This example visualizes a source space mesh used by a forward operator.
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import os.path as op
 
 import mne
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = op.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif')
 
-add_geom = True  # include high resolution source space
-src = mne.read_source_spaces(fname, add_geom=add_geom)
+patch_stats = True  # include high resolution source space
+src = mne.read_source_spaces(fname, patch_stats=patch_stats)
 
 # 3D source space (high sampling)
 lh_points = src[0]['rr']
 lh_faces = src[0]['tris']
 rh_points = src[1]['rr']
 rh_faces = src[1]['tris']
-try:
-    from enthought.mayavi import mlab
-except:
-    from mayavi import mlab
-mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
-mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
-                     lh_faces)
-mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
-                     rh_faces)
+
+from mayavi import mlab  # noqa
+mlab.figure(size=(600, 600), bgcolor=(0, 0, 0),)
+mesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],
+                            lh_faces, colormap='RdBu')
+mesh.module_manager.scalar_lut_manager.reverse_lut = True
+
+mesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],
+                            rh_faces, colormap='RdBu')
+mesh.module_manager.scalar_lut_manager.reverse_lut = True
diff --git a/examples/inverse/plot_read_stc.py b/examples/inverse/plot_read_stc.py
index dadd9b1..f41508f 100644
--- a/examples/inverse/plot_read_stc.py
+++ b/examples/inverse/plot_read_stc.py
@@ -10,11 +10,13 @@ reconstructions
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis-meg'
 
@@ -25,7 +27,6 @@ print("stc data size: %s (nb of vertices) x %s (nb of samples)"
       % (n_vertices, n_samples))
 
 # View source activations
-import matplotlib.pyplot as plt
 plt.plot(stc.times, stc.data[::100, :].T)
 plt.xlabel('time (ms)')
 plt.ylabel('Source amplitude')
diff --git a/examples/inverse/plot_snr_estimate.py b/examples/inverse/plot_snr_estimate.py
new file mode 100644
index 0000000..acc7223
--- /dev/null
+++ b/examples/inverse/plot_snr_estimate.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""
+============================
+Plot an estimate of data SNR
+============================
+
+This estimates the SNR as a function of time for a set of data.
+"""
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+from os import path as op
+
+from mne.datasets.sample import data_path
+from mne.minimum_norm import read_inverse_operator
+from mne import read_evokeds
+from mne.viz import plot_snr_estimate
+
+print(__doc__)
+
+data_dir = op.join(data_path(), 'MEG', 'sample')
+fname_inv = op.join(data_dir, 'sample_audvis-meg-oct-6-meg-inv.fif')
+fname_evoked = op.join(data_dir, 'sample_audvis-ave.fif')
+
+inv = read_inverse_operator(fname_inv)
+evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
+
+plot_snr_estimate(evoked, inv)
diff --git a/examples/inverse/plot_tf_dics.py b/examples/inverse/plot_tf_dics.py
index 2446e65..b41ce36 100644
--- a/examples/inverse/plot_tf_dics.py
+++ b/examples/inverse/plot_tf_dics.py
@@ -10,13 +10,10 @@ The original reference is:
 Dalal et al. Five-dimensional neuroimaging: Localization of the time-frequency
 dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
 """
-
 # Author: Roman Goj <roman.goj at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.io import Raw
 from mne.event import make_fixed_length_events
@@ -25,6 +22,8 @@ from mne.time_frequency import compute_epochs_csd
 from mne.beamformer import tf_dics
 from mne.viz import plot_source_spectrogram
 
+print(__doc__)
+
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 noise_fname = data_path + '/MEG/sample/ernoise_raw.fif'
diff --git a/examples/inverse/plot_tf_lcmv.py b/examples/inverse/plot_tf_lcmv.py
index 8fcd076..14e10b8 100644
--- a/examples/inverse/plot_tf_lcmv.py
+++ b/examples/inverse/plot_tf_lcmv.py
@@ -10,13 +10,10 @@ The original reference is:
 Dalal et al. Five-dimensional neuroimaging: Localization of the time-frequency
 dynamics of cortical activity. NeuroImage (2008) vol. 40 (4) pp. 1686-1700
 """
-
 # Author: Roman Goj <roman.goj at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import compute_covariance
 from mne.io import Raw
@@ -25,6 +22,7 @@ from mne.event import make_fixed_length_events
 from mne.beamformer import tf_lcmv
 from mne.viz import plot_source_spectrogram
 
+print(__doc__)
 
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
@@ -58,8 +56,8 @@ tmin, tmax = -0.55, 0.75  # s
 tmin_plot, tmax_plot = -0.3, 0.5  # s
 
 # Read epochs. Note that preload is set to False to enable tf_lcmv to read the
-# underlying raw object from epochs.raw, which would be set to None during
-# preloading. Filtering is then performed on raw data in tf_lcmv and the epochs
+# underlying raw object.
+# Filtering is then performed on raw data in tf_lcmv and the epochs
 # parameters passed here are used to create epochs from filtered data. However,
 # reading epochs without preloading means that bad epoch rejection is delayed
 # until later. To perform bad epoch rejection based on the reject parameter
@@ -102,9 +100,7 @@ win_lengths = [0.3, 0.2, 0.15, 0.1]  # s
 # Setting the time step
 tstep = 0.05
 
-# Setting the noise covariance and whitened data covariance regularization
-# parameters
-noise_reg = 0.03
+# Setting the whitened data covariance regularization parameter
 data_reg = 0.001
 
 # Subtract evoked response prior to computation?
@@ -123,9 +119,7 @@ for (l_freq, h_freq) in freq_bins:
                              tmin=tmin_plot, tmax=tmax_plot, baseline=None,
                              picks=epochs.picks, proj=True)
 
-    noise_cov = compute_covariance(epochs_band)
-    noise_cov = mne.cov.regularize(noise_cov, epochs_band.info, mag=noise_reg,
-                                   grad=noise_reg, eeg=noise_reg, proj=True)
+    noise_cov = compute_covariance(epochs_band, method='shrunk')
     noise_covs.append(noise_cov)
     del raw_band  # to save memory
 
diff --git a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
index d15294d..960891c 100644
--- a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
+++ b/examples/inverse/plot_time_frequency_mixed_norm_inverse.py
@@ -36,18 +36,19 @@ http://dx.doi.org/10.1007/978-3-642-22092-0_49
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
 from mne.minimum_norm import make_inverse_operator, apply_inverse
 from mne.inverse_sparse import tf_mixed_norm
 from mne.viz import plot_sparse_source_estimates
 
+print(__doc__)
+
 data_path = sample.data_path()
+subjects_dir = data_path + '/subjects'
 fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
 ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
-cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
 
 # Read noise covariance matrix
 cov = mne.read_cov(cov_fname)
@@ -64,8 +65,6 @@ evoked.crop(tmin=-0.1, tmax=0.4)
 forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
                                     surf_ori=True)
 
-cov = mne.cov.regularize(cov, evoked.info)
-
 ###############################################################################
 # Run solver
 
@@ -95,14 +94,15 @@ stc.crop(tmin=-0.05, tmax=0.3)
 evoked.crop(tmin=-0.05, tmax=0.3)
 residual.crop(tmin=-0.05, tmax=0.3)
 
-ylim = dict(eeg=[-10, 10], grad=[-200, 250], mag=[-600, 600])
-picks = mne.pick_types(evoked.info, meg='grad', exclude='bads')
-evoked.plot(picks=picks, ylim=ylim, proj=True,
-            titles=dict(grad='Evoked Response (grad)'))
+# Show the evoked response and the residual for gradiometers
+ylim = dict(grad=[-120, 120])
+evoked.pick_types(meg='grad', exclude='bads')
+evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
+            proj=True)
 
-picks = mne.pick_types(residual.info, meg='grad', exclude='bads')
-residual.plot(picks=picks, ylim=ylim, proj=True,
-              titles=dict(grad='Residual (grad)'))
+residual.pick_types(meg='grad', exclude='bads')
+residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
+              proj=True)
 
 ###############################################################################
 # View in 2D and 3D ("glass" brain like 3D plot)
@@ -111,9 +111,9 @@ plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
                              % condition, modes=['sphere'], scale_factors=[1.])
 
 time_label = 'TF-MxNE time=%0.2f ms'
-brain = stc.plot('sample', 'inflated', 'rh', fmin=10e-9, fmid=15e-9,
-                 fmax=20e-9, time_label=time_label, smoothing_steps=5,
-                 subjects_dir=data_path + '/subjects')
+clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
+brain = stc.plot('sample', 'inflated', 'rh', clim=clim, time_label=time_label,
+                 smoothing_steps=5, subjects_dir=subjects_dir)
 brain.show_view('medial')
 brain.set_data_time_index(120)
 brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
diff --git a/examples/io/README.txt b/examples/io/README.txt
new file mode 100644
index 0000000..45f88d1
--- /dev/null
+++ b/examples/io/README.txt
@@ -0,0 +1,5 @@
+
+Input/Ouput
+-----------
+
+Reading and writing files.
diff --git a/examples/io/plot_objects_from_arrays.py b/examples/io/plot_objects_from_arrays.py
new file mode 100644
index 0000000..f8a84b8
--- /dev/null
+++ b/examples/io/plot_objects_from_arrays.py
@@ -0,0 +1,121 @@
+"""
+=====================================
+Creating MNE objects from data arrays
+=====================================
+
+In this simple example, the creation of MNE objects from
+numpy arrays is demonstrated. In the last example case, a
+NEO file format is used as a source for the data.
+"""
+# Author: Jaakko Leppakangas <jaeilepp at student.jyu.fi>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import neo
+
+import mne
+
+print(__doc__)
+
+###############################################################################
+# Create arbitrary data
+
+sfreq = 1000  # Sampling frequency
+times = np.arange(0, 10, 0.001)  # Use 10000 samples (10s)
+
+sin = np.sin(times * 10)  # Multiplied by 10 for shorter phase
+cos = np.cos(times * 10)
+sinX2 = sin * 2
+cosX2 = cos * 2
+
+# Numpy array of size 4 X 10000.
+data = np.array([sin, cos, sinX2, cosX2])
+
+# Definition of channel types and names.
+ch_types = ['mag', 'mag', 'grad', 'grad']
+ch_names = ['sin', 'cos', 'sinX2', 'cosX2']
+
+###############################################################################
+# Creation of info dictionary.
+
+# It is also possible to use info from another raw object.
+info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+
+raw = mne.io.RawArray(data, info)
+
+# Scaling of the figure.
+# For actual EEG/MEG data different scaling factors should be used.
+scalings = {'mag': 2, 'grad': 2}
+
+raw.plot(n_channels=4, scalings=scalings, title='Data from arrays',
+         show=True, block=True)
+
+
+###############################################################################
+# EpochsArray
+
+event_id = 1
+events = np.array([[200, 0, event_id],
+                   [1200, 0, event_id],
+                   [2000, 0, event_id]])  # List of three arbitrary events
+
+# Here a data set of 700 ms epochs from 2 channels is
+# created from sin and cos data.
+# Any data in shape (n_epochs, n_channels, n_times) can be used.
+epochs_data = [[sin[:700], cos[:700]],
+               [sin[1000:1700], cos[1000:1700]],
+               [sin[1800:2500], cos[1800:2500]]]
+
+ch_names = ['sin', 'cos']
+ch_types = ['mag', 'mag']
+info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+
+epochs = mne.EpochsArray(epochs_data, info=info, events=events,
+                         event_id={'arbitrary': 1})
+
+picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
+
+epochs.plot(picks=picks, show=True, block=True)
+
+
+###############################################################################
+# EvokedArray
+
+nave = len(epochs_data)  # Number of averaged epochs
+evoked_data = np.mean(epochs_data, axis=0)
+
+evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
+                          comment='Arbitrary', nave=nave)
+evokeds.plot(picks=picks, show=True, units={'mag': '-'},
+             titles={'mag': 'sin and cos averaged'})
+
+
+###############################################################################
+# Extracting data from NEO file
+
+# The example here uses the ExampleIO object for creating fake data.
+# For actual data and different file formats, consult the NEO documentation.
+reader = neo.io.ExampleIO('fakedata.nof')
+bl = reader.read(cascade=True, lazy=False)[0]
+
+# Get data from first (and only) segment
+seg = bl.segments[0]
+title = seg.file_origin
+
+ch_names = list()
+data = list()
+for asig in seg.analogsignals:
+    # Since the data does not contain channel names, channel indices are used.
+    ch_names.append(str(asig.channel_index))
+    asig = asig.rescale('V').magnitude
+    data.append(asig)
+
+sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
+
+# By default, the channel types are assumed to be 'misc'.
+info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
+
+raw = mne.io.RawArray(data, info)
+raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
+         show=True, block=True, clipping='clamp')
diff --git a/examples/plot_read_and_write_raw_data.py b/examples/io/plot_read_and_write_raw_data.py
similarity index 99%
rename from examples/plot_read_and_write_raw_data.py
rename to examples/io/plot_read_and_write_raw_data.py
index b75b72b..84416d6 100644
--- a/examples/plot_read_and_write_raw_data.py
+++ b/examples/io/plot_read_and_write_raw_data.py
@@ -11,10 +11,11 @@ raw file.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
diff --git a/examples/plot_read_epochs.py b/examples/io/plot_read_epochs.py
similarity index 99%
rename from examples/plot_read_epochs.py
rename to examples/io/plot_read_epochs.py
index b926a76..b6dfd32 100644
--- a/examples/plot_read_epochs.py
+++ b/examples/io/plot_read_epochs.py
@@ -13,11 +13,12 @@ for both MEG and EEG data by averaging all the epochs.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -39,6 +40,7 @@ picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                     picks=picks, baseline=(None, 0), preload=True,
                     reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
+
 evoked = epochs.average()  # average epochs to get the evoked response
 
 ###############################################################################
diff --git a/examples/plot_read_evoked.py b/examples/io/plot_read_evoked.py
similarity index 100%
rename from examples/plot_read_evoked.py
rename to examples/io/plot_read_evoked.py
index aea48a7..4750d94 100644
--- a/examples/plot_read_evoked.py
+++ b/examples/io/plot_read_evoked.py
@@ -8,11 +8,11 @@ Reading and writing an evoked file
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 from mne import read_evokeds
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
diff --git a/examples/plot_read_noise_covariance_matrix.py b/examples/io/plot_read_noise_covariance_matrix.py
similarity index 57%
rename from examples/plot_read_noise_covariance_matrix.py
rename to examples/io/plot_read_noise_covariance_matrix.py
index 284e4f8..d3c0c27 100644
--- a/examples/plot_read_noise_covariance_matrix.py
+++ b/examples/io/plot_read_noise_covariance_matrix.py
@@ -2,28 +2,28 @@
 =========================================
 Reading/Writing a noise covariance matrix
 =========================================
+
+Plot a noise covariance matrix.
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
+from os import path as op
 import mne
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
-fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
 
-cov = mne.Covariance(fname)
+cov = mne.read_cov(fname_cov)
 print(cov)
+evoked = mne.read_evokeds(fname_evo)[0]
 
 ###############################################################################
 # Show covariance
 
-# Note: if you have the measurement info you can use mne.viz.plot_cov
-
-import matplotlib.pyplot as plt
-plt.matshow(cov.data)
-plt.title('Noise covariance matrix (%d channels)' % cov.data.shape[0])
-plt.show()
+cov.plot(evoked.info, exclude='bads', show_svd=False)
diff --git a/examples/read_events.py b/examples/io/read_events.py
similarity index 100%
rename from examples/read_events.py
rename to examples/io/read_events.py
index 8ef1778..ec6da51 100644
--- a/examples/read_events.py
+++ b/examples/io/read_events.py
@@ -9,11 +9,11 @@ Read events from a file.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
 
diff --git a/examples/inverse/plot_compute_mne_inverse.py b/examples/plot_compute_mne_inverse.py
similarity index 86%
rename from examples/inverse/plot_compute_mne_inverse.py
rename to examples/plot_compute_mne_inverse.py
index 938cf88..075b560 100644
--- a/examples/inverse/plot_compute_mne_inverse.py
+++ b/examples/plot_compute_mne_inverse.py
@@ -7,18 +7,16 @@ Compute dSPM inverse solution on MNE evoked dataset
 and stores the solution in stc files for visualisation.
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
 from mne.datasets import sample
 from mne import read_evokeds
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 
+print(__doc__)
 
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -47,10 +45,8 @@ plt.xlabel('time (ms)')
 plt.ylabel('%s value' % method)
 plt.show()
 
-# Plot brain in 3D with PySurfer if available. Note that the subject name
-# is already known by the SourceEstimate stc object.
-brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
-brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
+# Plot brain in 3D with PySurfer if available
+brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
 brain.show_view('lateral')
 
 # use peak getter to move vizualization to the time point of the peak
diff --git a/examples/plot_evoked_whitening.py b/examples/plot_evoked_whitening.py
deleted file mode 100644
index 98e4522..0000000
--- a/examples/plot_evoked_whitening.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-=============================================
-Whitening evoked data with a noise covariance
-=============================================
-
-Evoked data are loaded and then whitened using a given
-noise covariance matrix. It's an excellent
-quality check to see if baseline signals match the assumption
-of Gaussian whiten noise from which we expect values around
-and less than 2 standard deviations.
-
-"""
-# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-from mne import read_cov, whiten_evoked, pick_types, read_evokeds
-from mne.cov import regularize
-from mne.datasets import sample
-
-data_path = sample.data_path()
-
-fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
-cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
-
-# Reading
-evoked = read_evokeds(fname, condition=0, baseline=(None, 0), proj=True)
-noise_cov = read_cov(cov_fname)
-
-###############################################################################
-# Show result
-
-  # Pick channels to view
-picks = pick_types(evoked.info, meg=True, eeg=True, exclude='bads')
-evoked.plot(picks=picks)
-
-noise_cov = regularize(noise_cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1)
-
-evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
-
-# plot the whitened evoked data to see if baseline signals match the
-# assumption of Gaussian whiten noise from which we expect values around
-# and less than 2 standard deviations.
-evoked_white.plot(picks=picks, unit=False, hline=[-2, 2])
diff --git a/examples/plot_extract_events_from_raw.py b/examples/plot_extract_events_from_raw.py
index e4a7a2e..4d52daf 100644
--- a/examples/plot_extract_events_from_raw.py
+++ b/examples/plot_extract_events_from_raw.py
@@ -10,12 +10,12 @@ The plot them to get an idea of the paradigm.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 
@@ -25,7 +25,7 @@ raw = Raw(fname)
 events = mne.find_events(raw, stim_channel='STI 014')
 
 # Writing events
-mne.write_events('events.fif', events)
+mne.write_events('sample_audvis_raw-eve.fif', events)
 
 for ind, before, after in events[:5]:
     print("At sample %d stim channel went from %d to %d"
diff --git a/examples/plot_from_raw_to_epochs_to_evoked.py b/examples/plot_from_raw_to_epochs_to_evoked.py
index a656bbe..c655cc6 100644
--- a/examples/plot_from_raw_to_epochs_to_evoked.py
+++ b/examples/plot_from_raw_to_epochs_to_evoked.py
@@ -13,66 +13,65 @@ data and then saved to disk.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
 # Set parameters
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
-event_id, tmin, tmax = 1, -0.2, 0.5
+tmin, tmax = -0.2, 0.5
+
+# Select events to extract epochs from.
+event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
 
 #   Setup for reading the raw data
 raw = io.Raw(raw_fname)
 events = mne.read_events(event_fname)
 
 #   Plot raw data
-fig = raw.plot(events=events)
+raw.plot(events=events, event_color={1: 'cyan', -1: 'lightgray'})
 
 #   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
 include = []  # or stim channels ['STI 014']
-raw.info['bads'] += ['EEG 053']  # bads + 1 more
+raw.info['bads'] = ['MEG 2443', 'EEG 053']  # set bads
 
-# pick EEG channels
-picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+# pick EEG and MEG channels
+picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
                        include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6),
                     preload=True)
 
-epochs.plot()
+# Plot epochs.
+epochs.plot(title='Auditory left/right')
+
+# Look at channels that caused dropped events, showing that the subject's
+# blinks were likely to blame for most epochs being dropped
+epochs.drop_bad_epochs()
+epochs.plot_drop_log(subject='sample')
 
-evoked = epochs.average()  # average epochs and get an Evoked dataset.
+# Average epochs and get evoked data corresponding to the left stimulation
+evoked = epochs['Left'].average()
 
 evoked.save('sample_audvis_eeg-ave.fif')  # save evoked data to disk
 
 ###############################################################################
 # View evoked response
-times = 1e3 * epochs.times  # time in miliseconds
-
-ch_max_name, latency = evoked.get_peak(mode='neg')
 
-import matplotlib.pyplot as plt
-evoked.plot()
+evoked.plot(gfp=True)
 
-plt.xlim([times[0], times[-1]])
-plt.xlabel('time (ms)')
-plt.ylabel('Potential (uV)')
-plt.title('EEG evoked potential')
-
-plt.axvline(latency * 1e3, color='red', 
-            label=ch_max_name, linewidth=2,
-            linestyle='--')
-plt.legend(loc='best')
+###############################################################################
+# Save evoked responses for different conditions to disk
 
-plt.show()
+# average epochs and get Evoked datasets
+evokeds = [epochs[cond].average() for cond in ['Left', 'Right']]
 
-# Look at channels that caused dropped events, showing that the subject's
-# blinks were likely to blame for most epochs being dropped
-epochs.drop_bad_epochs()
-epochs.plot_drop_log(subject='sample')
+# save evoked data to disk
+mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
diff --git a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
deleted file mode 100644
index a72272b..0000000
--- a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-====================================================================
-Extract epochs for multiple conditions, save evoked response to disk
-====================================================================
-
-This script shows how to read the epochs for multiple conditions from
-a raw file given a list of events. The epochs are averaged to produce
-evoked data and then saved to disk.
-
-"""
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Eric Larson <larson.eric.d at gmail.com>
-#          Denis Engemann <denis.engemann at gmail.com>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import mne
-from mne import io
-from mne.datasets import sample
-from mne.epochs import combine_event_ids
-data_path = sample.data_path()
-
-###############################################################################
-# Set parameters
-raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
-event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
-tmin = -0.2
-tmax = 0.5
-
-#   Setup for reading the raw data
-raw = io.Raw(raw_fname)
-events = mne.read_events(event_fname)
-
-#   Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
-include = []  # or stim channels ['STI 014']
-raw.info['bads'] += ['EEG 053']  # bads + 1 more
-
-# pick EEG channels
-picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
-                       include=include, exclude='bads')
-# Read epochs
-epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
-# Let's equalize the trial counts in each condition
-epochs.equalize_event_counts(['AudL', 'AudR', 'VisL', 'VisR'], copy=False)
-# Now let's combine some conditions
-combine_event_ids(epochs, ['AudL', 'AudR'], {'Auditory': 12}, copy=False)
-combine_event_ids(epochs, ['VisL', 'VisR'], {'Visual': 34}, copy=False)
-
-# average epochs and get Evoked datasets
-evokeds = [epochs[cond].average() for cond in ['Auditory', 'Visual']]
-
-# save evoked data to disk
-mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
-
-###############################################################################
-# View evoked response
-import matplotlib.pyplot as plt
-plt.clf()
-ax = plt.subplot(2, 1, 1)
-evokeds[0].plot(axes=ax)
-plt.title('EEG evoked potential, auditory trials')
-plt.ylabel('Potential (uV)')
-ax = plt.subplot(2, 1, 2)
-evokeds[1].plot(axes=ax)
-plt.title('EEG evoked potential, visual trials')
-plt.ylabel('Potential (uV)')
-plt.show()
diff --git a/examples/plot_read_forward.py b/examples/plot_read_forward.py
deleted file mode 100644
index 38b4613..0000000
--- a/examples/plot_read_forward.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-====================================================
-Read a forward operator and display sensitivity maps
-====================================================
-"""
-# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import mne
-from mne.datasets import sample
-data_path = sample.data_path()
-
-fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
-subjects_dir = data_path + '/subjects'
-
-fwd = mne.read_forward_solution(fname, surf_ori=True)
-leadfield = fwd['sol']['data']
-
-print("Leadfield size : %d x %d" % leadfield.shape)
-
-grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
-mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
-eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
-
-###############################################################################
-# Show gain matrix a.k.a. leadfield matrix with sensitivity map
-
-import matplotlib.pyplot as plt
-plt.matshow(leadfield[:, :500])
-plt.xlabel('sources')
-plt.ylabel('sensors')
-plt.title('Lead field matrix (500 dipoles only)')
-
-plt.figure()
-plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
-         bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'])
-plt.legend()
-plt.title('Normal orientation sensitivity')
-plt.show()
-
-args = dict(fmin=0.1, fmid=0.5, fmax=0.9, smoothing_steps=7)
-grad_map.plot(subject='sample', time_label='Gradiometer sensitivity',
-              subjects_dir=subjects_dir, **args)
diff --git a/examples/preprocessing/plot_corrmap_detection.py b/examples/preprocessing/plot_corrmap_detection.py
new file mode 100644
index 0000000..96ddf29
--- /dev/null
+++ b/examples/preprocessing/plot_corrmap_detection.py
@@ -0,0 +1,76 @@
+"""
+==========================================================
+Identify similar ICs across multiple datasets via CORRMAP
+==========================================================
+
+After fitting ICA to multiple data sets, CORRMAP [1]_
+automatically identifies similar ICs in all sets based
+on a manually selected template. These ICs can then be
+removed, or further investigated.
+
+References
+----------
+.. [1] Viola FC, et al. Semi-automatic identification of independent components
+       representing EEG artifact. Clin Neurophysiol 2009, May; 120(5): 868-77.
+"""
+
+# Authors: Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+
+from mne.io import Raw
+from mne.preprocessing import ICA
+from mne.preprocessing.ica import corrmap
+from mne.datasets import sample
+
+print(__doc__)
+
+###############################################################################
+# Setup paths and prepare epochs data
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = Raw(raw_fname, preload=True)
+raw.filter(1, 30, method='iir')
+picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True, ecg=False,
+                       stim=False, exclude='bads')
+
+events = mne.find_events(raw, stim_channel='STI 014')
+event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
+reject = dict(eog=250e-6)
+tmin, tmax = -0.5, 0.75
+
+
+###############################################################################
+# 1) Fit ICA to all "subjects".
+# In a real-world case, this would instead be multiple subjects/data sets,
+# here we create artificial subsets
+
+all_epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
+                        proj=False, picks=picks, baseline=(None, 0),
+                        preload=True, reject=None, verbose=False)
+
+all_epochs = [all_epochs[start:stop] for start, stop in
+              [(0, 100), (101, 200), (201, 300)]]
+
+icas = [ICA(n_components=20, random_state=1).fit(epochs)
+        for epochs in all_epochs]
+
+# 2) Use corrmap to identify the maps best corresponding
+#    to a pre-specified template across all subsets
+#    (or, in the real world, multiple participant data sets)
+
+template = (0, 0)
+fig_template, fig_detected = corrmap(icas, template=template, label="blinks",
+                                     show=True, threshold=.8)
+
+# 3) Zeroing the identified blink components for all data sets
+#    results in individually cleaned data sets. Specific components
+#    can be accessed using the label_ attribute.
+
+for ica in icas:
+    print(ica.labels_)
diff --git a/examples/plot_define_target_events.py b/examples/preprocessing/plot_define_target_events.py
similarity index 100%
rename from examples/plot_define_target_events.py
rename to examples/preprocessing/plot_define_target_events.py
index 59ea00d..39e6e28 100644
--- a/examples/plot_define_target_events.py
+++ b/examples/preprocessing/plot_define_target_events.py
@@ -17,12 +17,14 @@ and 'slowly-processed' face stimuli.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import io
 from mne.event import define_target_events
 from mne.datasets import sample
+import matplotlib.pyplot as plt
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -81,8 +83,6 @@ early, late = [epochs[k].average() for k in event_id]
 ###############################################################################
 # View evoked response
 
-import matplotlib.pyplot as plt
-
 times = 1e3 * epochs.times  # time in milliseconds
 title = 'Evoked response followed by %s button press'
 
diff --git a/examples/preprocessing/plot_eog_artifact_histogram.py b/examples/preprocessing/plot_eog_artifact_histogram.py
index ea9e25c..72220c4 100644
--- a/examples/preprocessing/plot_eog_artifact_histogram.py
+++ b/examples/preprocessing/plot_eog_artifact_histogram.py
@@ -10,13 +10,16 @@ Compute the distribution of timing for EOG artifacts.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -37,7 +40,7 @@ event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
 epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
 
 # Get the stim channel data
-pick_ch = mne.pick_channels(epochs.ch_names, 'STI 014')[0]
+pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
 data = epochs.get_data()[:, pick_ch, :].astype(int)
 data = np.sum((data.astype(int) & 512) == 512, axis=0)
 
diff --git a/examples/plot_estimate_covariance_matrix_baseline.py b/examples/preprocessing/plot_estimate_covariance_matrix_baseline.py
similarity index 100%
rename from examples/plot_estimate_covariance_matrix_baseline.py
rename to examples/preprocessing/plot_estimate_covariance_matrix_baseline.py
index 11c5c99..d9dfb2e 100644
--- a/examples/plot_estimate_covariance_matrix_baseline.py
+++ b/examples/preprocessing/plot_estimate_covariance_matrix_baseline.py
@@ -12,12 +12,12 @@ a.k.a. baseline.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import io
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
diff --git a/examples/plot_estimate_covariance_matrix_raw.py b/examples/preprocessing/plot_estimate_covariance_matrix_raw.py
similarity index 93%
rename from examples/plot_estimate_covariance_matrix_raw.py
rename to examples/preprocessing/plot_estimate_covariance_matrix_raw.py
index 837b93b..efafce8 100644
--- a/examples/plot_estimate_covariance_matrix_raw.py
+++ b/examples/preprocessing/plot_estimate_covariance_matrix_raw.py
@@ -8,12 +8,12 @@ Estimate covariance matrix from a raw FIF file
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 from mne import io
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
 
@@ -29,7 +29,7 @@ picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
 reject = dict(eeg=80e-6, eog=150e-6)
 
 # Compute the covariance from the raw data
-cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject)
+cov = mne.compute_raw_covariance(raw, picks=picks, reject=reject)
 print(cov)
 
 ###############################################################################
diff --git a/examples/preprocessing/plot_find_ecg_artifacts.py b/examples/preprocessing/plot_find_ecg_artifacts.py
index bc3f653..13f84ae 100644
--- a/examples/preprocessing/plot_find_ecg_artifacts.py
+++ b/examples/preprocessing/plot_find_ecg_artifacts.py
@@ -10,13 +10,16 @@ Locate QRS component of ECG.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
diff --git a/examples/preprocessing/plot_find_eog_artifacts.py b/examples/preprocessing/plot_find_eog_artifacts.py
index e40af5f..6b884bc 100644
--- a/examples/preprocessing/plot_find_eog_artifacts.py
+++ b/examples/preprocessing/plot_find_eog_artifacts.py
@@ -10,13 +10,16 @@ Locate peaks of EOG to spot blinks and general EOG artifacts.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -31,7 +34,7 @@ eog_events = mne.preprocessing.find_eog_events(raw, event_id)
 
 # Read epochs
 picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
-                        exclude='bads')
+                       exclude='bads')
 tmin, tmax = -0.2, 0.2
 epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
 data = epochs.get_data()
diff --git a/examples/preprocessing/plot_ica_from_epochs.py b/examples/preprocessing/plot_ica_from_epochs.py
deleted file mode 100644
index ba7c43e..0000000
--- a/examples/preprocessing/plot_ica_from_epochs.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-================================
-Compute ICA components on epochs
-================================
-
-ICA is fit to MEG raw data.
-We assume that the non-stationary EOG artifacts have already been removed.
-The sources matching the ECG are automatically found and displayed.
-Subsequently, artefact detection and rejection quality are assessed.
-Finally, the impact on the evoked ERF is visualized.
-"""
-print(__doc__)
-
-# Authors: Denis Engemann <denis.engemann at gmail.com>
-#
-# License: BSD (3-clause)
-
-import numpy as np
-import mne
-from mne.io import Raw
-from mne.preprocessing import ICA, create_ecg_epochs
-from mne.datasets import sample
-
-###############################################################################
-# Setup paths and prepare epochs data
-
-data_path = sample.data_path()
-raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-
-raw = Raw(raw_fname, preload=True)
-raw.filter(1, 30, method='iir')
-picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, ecg=True,
-                       stim=False, exclude='bads')
-
-# longer + more epochs for more artifact exposure
-events = mne.find_events(raw, stim_channel='STI 014')
-event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
-reject = dict(eog=250e-6)
-tmin, tmax = -0.5, 0.5
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks,
-                    baseline=(None, 0), preload=True, reject=reject)
-
-###############################################################################
-# 1) Fit ICA model using the FastICA algorithm
-
-ica = ICA(n_components=0.95, method='fastica').fit(epochs)
-
-###############################################################################
-# 2) Find ECG Artifacts
-
-# generate ECG epochs to improve detection by correlation
-ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
-
-ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
-ica.plot_scores(scores, exclude=ecg_inds)
-
-title = 'Sources related to %s artifacts (red)'
-show_picks = np.abs(scores).argsort()[::-1][:5]
-
-ica.plot_sources(epochs, show_picks, exclude=ecg_inds, title=title % 'ecg')
-ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
-
-ica.exclude += ecg_inds[:3]  # by default we expect 3 reliable ECG components
-
-###############################################################################
-# 3) Assess component selection and unmixing quality
-
-# estimate average artifact
-ecg_evoked = ecg_epochs.average()
-ica.plot_sources(ecg_evoked)  # plot ECG sources + selection
-ica.plot_overlay(ecg_evoked)  # plot ECG cleaning
-
-# check effect on ERF of interest
-epochs.crop(-.2, None)  # crop to baseline of interest
-ica.plot_overlay(epochs['aud_l'].average())  # plot remaining left auditory ERF
diff --git a/examples/preprocessing/plot_interpolate_bad_channels.py b/examples/preprocessing/plot_interpolate_bad_channels.py
new file mode 100644
index 0000000..be6282d
--- /dev/null
+++ b/examples/preprocessing/plot_interpolate_bad_channels.py
@@ -0,0 +1,43 @@
+"""
+=============================================
+Interpolate bad channels for MEG/EEG channels
+=============================================
+
+This example shows how to interpolate bad MEG/EEG channels
+
+    - Using spherical splines as described in [1]_ for EEG data.
+    - Using field interpolation for MEG data.
+
+The bad channels will still be marked as bad. Only the data in those channels
+is removed.
+
+References
+----------
+.. [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989)
+       Spherical splines for scalp potential and current density mapping.
+       Electroencephalography and Clinical Neurophysiology, Feb; 72(2):184-7.
+"""
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne.datasets import sample
+
+print(__doc__)
+
+data_path = sample.data_path()
+
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+evoked = mne.read_evokeds(fname, condition='Left Auditory',
+                          baseline=(None, 0))
+
+# plot with bads
+evoked.plot(exclude=[])
+
+# compute interpolation (also works with Raw and Epochs objects)
+evoked.interpolate_bads(reset_bads=False)
+
+# plot interpolated (previous bads)
+evoked.plot(exclude=[])
diff --git a/examples/preprocessing/plot_rereference_eeg.py b/examples/preprocessing/plot_rereference_eeg.py
new file mode 100644
index 0000000..dc0c2eb
--- /dev/null
+++ b/examples/preprocessing/plot_rereference_eeg.py
@@ -0,0 +1,65 @@
+"""
+=============================
+Re-referencing the EEG signal
+=============================
+
+Load raw data and apply some EEG referencing schemes.
+"""
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne.datasets import sample
+from matplotlib import pyplot as plt
+
+print(__doc__)
+
+# Setup for reading the raw data
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id, tmin, tmax = 1, -0.2, 0.5
+
+# Read the raw data
+raw = mne.io.Raw(raw_fname, preload=True)
+events = mne.read_events(event_fname)
+
+# The EEG channels will be plotted to visualize the difference in referencing
+# schemes.
+picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True, exclude='bads')
+
+###############################################################################
+# Apply different EEG referencing schemes and plot the resulting evokeds
+
+reject = dict(eeg=180e-6, eog=150e-6)
+epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+                     picks=picks, reject=reject)
+
+fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True)
+
+# No reference. This assumes that the EEG has already been referenced properly.
+# This explicitly prevents MNE from adding a default EEG reference.
+raw_no_ref, _ = mne.io.set_eeg_reference(raw, [])
+evoked_no_ref = mne.Epochs(raw_no_ref, **epochs_params).average()
+del raw_no_ref  # Free memory
+
+evoked_no_ref.plot(axes=ax1, titles=dict(eeg='EEG Original reference'))
+
+# Average reference. This is normally added by default, but can also be added
+# explicitly.
+raw_car, _ = mne.io.set_eeg_reference(raw)
+evoked_car = mne.Epochs(raw_car, **epochs_params).average()
+del raw_car
+
+evoked_car.plot(axes=ax2, titles=dict(eeg='EEG Average reference'))
+
+# Use the mean of channels EEG 001 and EEG 002 as a reference
+raw_custom, _ = mne.io.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
+evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
+del raw_custom
+
+evoked_custom.plot(axes=ax3, titles=dict(eeg='EEG Custom reference'))
+
+mne.viz.tight_layout()
diff --git a/examples/preprocessing/plot_resample.py b/examples/preprocessing/plot_resample.py
new file mode 100644
index 0000000..9d50066
--- /dev/null
+++ b/examples/preprocessing/plot_resample.py
@@ -0,0 +1,85 @@
+"""
+===============
+Resampling data
+===============
+
+When performing experiments where timing is critical, a signal with a high
+sampling rate is desired. However, having a signal with a much higher sampling
+rate than is necessary needlessly consumes memory and slows down computations
+operating on the data.
+
+This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold
+reduction in data size, at the cost of an equal loss of temporal resolution.
+"""
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#
+# License: BSD (3-clause)
+#
+from __future__ import print_function
+
+from matplotlib import pyplot as plt
+
+import mne
+from mne.io import Raw
+from mne.datasets import sample
+
+###############################################################################
+# Setting up data paths and loading raw data
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+raw = Raw(raw_fname, preload=True)
+
+###############################################################################
+# Since downsampling reduces the timing precision of events, we recommend
+# first extracting epochs and downsampling the Epochs object:
+events = mne.find_events(raw)
+epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
+
+# Downsample to 100 Hz
+print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
+epochs_resampled = epochs.resample(100, copy=True)
+print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
+
+# Plot a piece of data to see the effects of downsampling
+plt.figure(figsize=(7, 3))
+
+n_samples_to_plot = int(0.5 * epochs.info['sfreq'])  # plot 0.5 seconds of data
+plt.plot(epochs.times[:n_samples_to_plot],
+         epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
+
+n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
+plt.plot(epochs_resampled.times[:n_samples_to_plot],
+         epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
+         '-o', color='red')
+
+plt.xlabel('time (s)')
+plt.legend(['original', 'downsampled'], loc='best')
+plt.title('Effect of downsampling')
+mne.viz.tight_layout()
+
+
+###############################################################################
+# When resampling epochs is unwanted or impossible, for example when the data
+# doesn't fit into memory or your analysis pipeline doesn't involve epochs at
+# all, the alternative approach is to resample the continous data. This
+# can also be done on non-preloaded data.
+
+# Resample to 300 Hz
+raw_resampled = raw.resample(300, copy=True)
+
+###############################################################################
+# Because resampling also affects the stim channels, some trigger onsets might
+# be lost in this case. While MNE attempts to downsample the stim channels in
+# an intelligent manner to avoid this, the recommended approach is to find
+# events on the original data before downsampling.
+print('Number of events before resampling:', len(mne.find_events(raw)))
+
+# Resample to 100 Hz (generates warning)
+raw_resampled = raw.resample(100, copy=True)
+print('Number of events after resampling:',
+      len(mne.find_events(raw_resampled)))
+
+# To avoid losing events, jointly resample the data and event matrix
+events = mne.find_events(raw)
+raw_resampled, events_resampled = raw.resample(100, events=events, copy=True)
+print('Number of events after resampling:', len(events_resampled))
diff --git a/examples/preprocessing/plot_run_ica.py b/examples/preprocessing/plot_run_ica.py
new file mode 100644
index 0000000..ab61561
--- /dev/null
+++ b/examples/preprocessing/plot_run_ica.py
@@ -0,0 +1,47 @@
+# doc:slow-example
+"""
+================================
+Compute ICA components on epochs
+================================
+
+ICA is fit to MEG raw data.
+We assume that the non-stationary EOG artifacts have already been removed.
+The sources matching the ECG are automatically found and displayed.
+Subsequently, artefact detection and rejection quality are assessed.
+Finally, the impact on the evoked ERF is visualized.
+
+Note that this example does quite a bit of processing, so even on a
+fast machine it can take about a minute to complete.
+"""
+
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne.io import Raw
+from mne.preprocessing import ICA, create_ecg_epochs
+from mne.datasets import sample
+
+print(__doc__)
+
+###############################################################################
+# Fit ICA model using the FastICA algorithm, detect and inspect components
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = Raw(raw_fname, preload=True)
+raw.filter(1, 30, method='iir')
+raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True)
+
+# longer + more epochs for more artifact exposure
+events = mne.find_events(raw, stim_channel='STI 014')
+epochs = mne.Epochs(raw, events, event_id=None, tmin=-0.2, tmax=0.5)
+
+ica = ICA(n_components=0.95, method='fastica').fit(epochs)
+
+ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
+ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
+
+ica.plot_components(ecg_inds)
diff --git a/examples/plot_shift_evoked.py b/examples/preprocessing/plot_shift_evoked.py
similarity index 93%
rename from examples/plot_shift_evoked.py
rename to examples/preprocessing/plot_shift_evoked.py
index 6b8fe5d..aa30fdb 100644
--- a/examples/plot_shift_evoked.py
+++ b/examples/preprocessing/plot_shift_evoked.py
@@ -8,13 +8,13 @@ Shifting time-scale in evoked data
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
 import mne
 from mne.viz import tight_layout
 from mne.datasets import sample
 
+print(__doc__)
+
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
@@ -25,7 +25,7 @@ evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
                           proj=True)
 
 ch_names = evoked.info['ch_names']
-picks = mne.pick_channels(ch_names=ch_names, include="MEG 2332", exclude="bad")
+picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
 
 # Create subplots
 f, (ax1, ax2, ax3) = plt.subplots(3)
diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/plot_virtual_evoked.py
new file mode 100644
index 0000000..3c24364
--- /dev/null
+++ b/examples/preprocessing/plot_virtual_evoked.py
@@ -0,0 +1,39 @@
+"""
+=======================
+Remap MEG channel types
+=======================
+
+In this example, MEG data are remapped from one
+channel type to another. This is useful to:
+
+    - visualize combined magnetometers and gradiometers as magnetometers
+      or gradiometers.
+    - run statistics from both magnetometers and gradiometers while
+      working with a single type of channels.
+"""
+
+# Author: Mainak Jas <mainak.jas at telecom-paristech.fr>
+
+# License: BSD (3-clause)
+
+import mne
+from mne.datasets import sample
+
+print(__doc__)
+
+# read the evoked
+data_path = sample.data_path()
+fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))
+
+# go from grad + mag to mag
+virt_evoked = evoked.as_type('mag')
+evoked.plot_topomap(ch_type='mag', title='mag (original)')
+virt_evoked.plot_topomap(ch_type='mag',
+                         title='mag (interpolated from mag + grad)')
+
+# go from grad + mag to grad
+virt_evoked = evoked.as_type('grad')
+evoked.plot_topomap(ch_type='grad', title='grad (original)')
+virt_evoked.plot_topomap(ch_type='grad',
+                         title='grad (interpolated from mag + grad)')
diff --git a/examples/preprocessing/plot_xdawn_denoising.py b/examples/preprocessing/plot_xdawn_denoising.py
new file mode 100644
index 0000000..76b7ce8
--- /dev/null
+++ b/examples/preprocessing/plot_xdawn_denoising.py
@@ -0,0 +1,80 @@
+"""
+================
+ XDAWN Denoising
+================
+
+XDAWN filters are trained from epochs, signal is projected in the sources
+space and then projected back in the sensor space using only the first two
+XDAWN components. The process is similar to an ICA, but is
+supervised in order to maximize the signal to signal + noise ratio of the
+evoked response.
+
+WARNING: As this denoising method exploits the known events to
+maximize SNR of the contrast between conditions it can lead to overfit.
+To avoid a statistical analysis problem you should split epochs used
+in fit with the ones used in apply method.
+
+References
+----------
+[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
+algorithm to enhance evoked potentials: application to brain-computer
+interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
+
+[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
+August). Theoretical analysis of xDAWN algorithm: application to an
+efficient sensor selection in a P300 BCI. In Signal Processing Conference,
+2011 19th European (pp. 1382-1386). IEEE.
+"""
+
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+
+from mne import (io, compute_raw_covariance, read_events, pick_types,
+                 Epochs)
+from mne.datasets import sample
+from mne.preprocessing import Xdawn
+from mne.viz import plot_epochs_image
+
+print(__doc__)
+
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters and read data
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+tmin, tmax = -0.1, 0.3
+event_id = dict(vis_r=4)
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 20, method='iir')  # replace baselining with high-pass
+events = read_events(event_fname)
+
+raw.info['bads'] = ['MEG 2443']  # set bad channels
+picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
+                   exclude='bads')
+# Epoching
+epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
+                picks=picks, baseline=None, preload=True,
+                add_eeg_ref=False, verbose=False)
+
+# Plot image epoch before xdawn
+plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
+
+# Estimates signal covariance
+signal_cov = compute_raw_covariance(raw, picks=picks)
+
+# Xdawn instance
+xd = Xdawn(n_components=2, signal_cov=signal_cov)
+
+# Fit xdawn
+xd.fit(epochs)
+
+# Denoise epochs
+epochs_denoised = xd.apply(epochs)
+
+# Plot image epoch after xdawn
+plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
diff --git a/examples/realtime/ftclient_rt_average.py b/examples/realtime/ftclient_rt_average.py
index 3465ed7..8f0985a 100644
--- a/examples/realtime/ftclient_rt_average.py
+++ b/examples/realtime/ftclient_rt_average.py
@@ -26,18 +26,17 @@ measurement info from the Fieldtrip Header object.
 Together with RtEpochs, this can be used to compute evoked
 responses using moving averages.
 """
-
-print(__doc__)
-
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 #
 # License: BSD (3-clause)
 
+import matplotlib.pyplot as plt
+
 import mne
 from mne.viz import plot_events
 from mne.realtime import FieldTripClient, RtEpochs
 
-import matplotlib.pyplot as plt
+print(__doc__)
 
 # select the left-auditory condition
 event_id, tmin, tmax = 1, -0.2, 0.5
@@ -71,11 +70,13 @@ with FieldTripClient(host='localhost', port=1972,
     for ii, ev in enumerate(rt_epochs.iter_evoked()):
         print("Just got epoch %d" % (ii + 1))
 
-        if ii > 0:
-            ev += evoked
-        evoked = ev
+        if ii == 0:
+            evoked = ev
+        else:
+            evoked += ev
 
-        ax[0].cla(), ax[1].cla()  # clear axis
+        ax[0].cla()
+        ax[1].cla()  # clear axis
 
         plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],
                     first_samp=-rt_client.tmin_samp, axes=ax[0])
diff --git a/examples/realtime/ftclient_rt_compute_psd.py b/examples/realtime/ftclient_rt_compute_psd.py
new file mode 100644
index 0000000..22c1095
--- /dev/null
+++ b/examples/realtime/ftclient_rt_compute_psd.py
@@ -0,0 +1,74 @@
+"""
+==============================================================
+Compute real-time power spectrum density with FieldTrip client
+==============================================================
+
+Please refer to `ftclient_rt_average.py` for instructions on
+how to get the FieldTrip connector working in MNE-Python.
+
+This example demonstrates how to use it for continuous
+computation of power spectra in real-time using the
+get_data_as_epoch function.
+
+"""
+# Author: Mainak Jas <mainak at neuro.hut.fi>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+import mne
+from mne.realtime import FieldTripClient
+from mne.time_frequency import compute_epochs_psd
+
+print(__doc__)
+
+# user must provide list of bad channels because
+# FieldTrip header object does not provide that
+bads = ['MEG 2443', 'EEG 053']
+
+fig, ax = plt.subplots(1)
+with FieldTripClient(host='localhost', port=1972,
+                     tmax=150, wait_max=10) as rt_client:
+
+    # get measurement info guessed by MNE-Python
+    raw_info = rt_client.get_measurement_info()
+
+    # select gradiometers
+    picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
+                           stim=False, include=[], exclude=bads)
+
+    n_fft = 256  # the FFT size. Ideally a power of 2
+    n_samples = 2048  # time window on which to compute FFT
+    for ii in range(20):
+        epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
+        psd, freqs = compute_epochs_psd(epoch, fmin=2, fmax=200, n_fft=n_fft)
+
+        cmap = 'RdBu_r'
+        freq_mask = freqs < 150
+        freqs = freqs[freq_mask]
+        log_psd = 10 * np.log10(psd[0])
+
+        tmin = epoch.events[0][0] / raw_info['sfreq']
+        tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
+
+        if ii == 0:
+            im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
+                           origin='lower', cmap=cmap)
+
+            ax.set_yticks(np.arange(0, len(freqs), 10))
+            ax.set_yticklabels(freqs[::10].round(1))
+            ax.set_xlabel('Frequency (Hz)')
+            ax.set_xticks(np.arange(0, len(picks), 30))
+            ax.set_xticklabels(picks[::30])
+            ax.set_xlabel('MEG channel index')
+            im.set_clim()
+        else:
+            im.set_data(log_psd[:, freq_mask].T)
+
+        plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
+                  % (tmin, tmax), fontsize=10)
+
+        plt.pause(0.5)
+plt.close()
diff --git a/examples/realtime/plot_compute_rt_average.py b/examples/realtime/plot_compute_rt_average.py
index 50ba72c..7f6d779 100644
--- a/examples/realtime/plot_compute_rt_average.py
+++ b/examples/realtime/plot_compute_rt_average.py
@@ -11,7 +11,6 @@ Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
 has to be running on the same computer.
 """
 
-print(__doc__)
 
 # Authors: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Mainak Jas <mainak at neuro.hut.fi>
@@ -19,10 +18,13 @@ print(__doc__)
 # License: BSD (3-clause)
 
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.realtime import RtEpochs, MockRtClient
 
+print(__doc__)
+
 # Fiff file to simulate the realtime client
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
@@ -49,9 +51,10 @@ rt_epochs.start()
 rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
 for ii, ev in enumerate(rt_epochs.iter_evoked()):
     print("Just got epoch %d" % (ii + 1))
-    if ii > 0:
-        ev += evoked
-    evoked = ev
-    plt.clf() # clear canvas
+    if ii == 0:
+        evoked = ev
+    else:
+        evoked += ev
+    plt.clf()  # clear canvas
     evoked.plot(axes=plt.gca())  # plot on current figure
     plt.pause(0.05)
diff --git a/examples/realtime/plot_compute_rt_decoder.py b/examples/realtime/plot_compute_rt_decoder.py
index 61e566d..753728d 100644
--- a/examples/realtime/plot_compute_rt_decoder.py
+++ b/examples/realtime/plot_compute_rt_decoder.py
@@ -11,16 +11,14 @@ accuracy is plotted
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
-import time
+import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
 from mne.realtime import MockRtClient, RtEpochs
 from mne.datasets import sample
 
-import numpy as np
-import matplotlib.pyplot as plt
+print(__doc__)
 
 # Fiff file to simulate the realtime client
 data_path = sample.data_path()
@@ -53,32 +51,35 @@ rt_client.send_data(rt_epochs, picks, tmin=0, tmax=90, buffer_size=1000)
 # Decoding in sensor space using a linear SVM
 n_times = len(rt_epochs.times)
 
-from sklearn import preprocessing
-from sklearn.svm import SVC
-from sklearn.pipeline import Pipeline
-from sklearn.cross_validation import cross_val_score, ShuffleSplit
+from sklearn import preprocessing  # noqa
+from sklearn.svm import SVC  # noqa
+from sklearn.pipeline import Pipeline  # noqa
+from sklearn.cross_validation import cross_val_score, ShuffleSplit  # noqa
+from mne.decoding import EpochsVectorizer, FilterEstimator  # noqa
 
-from mne.decoding import ConcatenateChannels, FilterEstimator
 
 scores_x, scores, std_scores = [], [], []
 
 filt = FilterEstimator(rt_epochs.info, 1, 40)
 scaler = preprocessing.StandardScaler()
-concatenator = ConcatenateChannels()
+vectorizer = EpochsVectorizer()
 clf = SVC(C=1, kernel='linear')
 
-concat_classifier = Pipeline([('filter', filt), ('concat', concatenator),
+concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
                               ('scaler', scaler), ('svm', clf)])
 
+data_picks = mne.pick_types(rt_epochs.info, meg='grad', eeg=False, eog=True,
+                            stim=False, exclude=raw.info['bads'])
+
 for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
 
     print("Just got epoch %d" % (ev_num + 1))
 
     if ev_num == 0:
-        X = ev.data[None, ...]
-        y = int(ev.comment)
+        X = ev.data[None, data_picks, :]
+        y = int(ev.comment)  # the comment attribute contains the event_id
     else:
-        X = np.concatenate((X, ev.data[None, ...]), axis=0)
+        X = np.concatenate((X, ev.data[None, data_picks, :]), axis=0)
         y = np.append(y, int(ev.comment))
 
     if ev_num >= min_trials:
@@ -94,18 +95,19 @@ for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
         # Plot accuracy
         plt.clf()
 
-        plt.plot(scores_x[-5:], scores[-5:], '+', label="Classif. score")
+        plt.plot(scores_x, scores, '+', label="Classif. score")
         plt.hold(True)
-        plt.plot(scores_x[-5:], scores[-5:])
+        plt.plot(scores_x, scores)
         plt.axhline(50, color='k', linestyle='--', label="Chance level")
-        hyp_limits = (np.asarray(scores[-5:]) - np.asarray(std_scores[-5:]),
-                      np.asarray(scores[-5:]) + np.asarray(std_scores[-5:]))
-        plt.fill_between(scores_x[-5:], hyp_limits[0], y2=hyp_limits[1],
+        hyp_limits = (np.asarray(scores) - np.asarray(std_scores),
+                      np.asarray(scores) + np.asarray(std_scores))
+        plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1],
                          color='b', alpha=0.5)
         plt.xlabel('Trials')
         plt.ylabel('Classification score (% correct)')
+        plt.xlim([min_trials, 50])
         plt.ylim([30, 105])
         plt.title('Real-time decoding')
-        plt.show()
-
-        time.sleep(0.1)
+        plt.show(block=False)
+        plt.pause(0.01)
+plt.show()
diff --git a/examples/realtime/rt_feedback_client.py b/examples/realtime/rt_feedback_client.py
index 1362f21..2937a4d 100644
--- a/examples/realtime/rt_feedback_client.py
+++ b/examples/realtime/rt_feedback_client.py
@@ -24,9 +24,6 @@ to test. However, it should be possible to adapt this script
 for a real experiment.
 
 """
-
-print(__doc__)
-
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 #
 # License: BSD (3-clause)
@@ -34,6 +31,8 @@ print(__doc__)
 from mne.realtime import StimClient
 from psychopy import visual, core
 
+print(__doc__)
+
 # Instantiating stimulation client
 
 # Port number must match port number used to instantiate
diff --git a/examples/realtime/rt_feedback_server.py b/examples/realtime/rt_feedback_server.py
index 0a188ea..292a32b 100644
--- a/examples/realtime/rt_feedback_server.py
+++ b/examples/realtime/rt_feedback_server.py
@@ -24,30 +24,29 @@ to test. However, it should be possible to adapt this script
 for a real experiment.
 
 """
-
-print(__doc__)
-
 # Author: Mainak Jas <mainak at neuro.hut.fi>
 #
 # License: BSD (3-clause)
 
 import time
-import mne
 
 import numpy as np
 import matplotlib.pyplot as plt
 
-from mne.datasets import sample
-from mne.realtime import StimServer
-from mne.realtime import MockRtClient
-from mne.decoding import ConcatenateChannels, FilterEstimator
-
 from sklearn import preprocessing
 from sklearn.svm import SVC
 from sklearn.pipeline import Pipeline
 from sklearn.cross_validation import train_test_split
 from sklearn.metrics import confusion_matrix
 
+import mne
+from mne.datasets import sample
+from mne.realtime import StimServer
+from mne.realtime import MockRtClient
+from mne.decoding import EpochsVectorizer, FilterEstimator
+
+print(__doc__)
+
 # Load fiff file to simulate data
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
@@ -67,10 +66,10 @@ with StimServer('localhost', port=4218) as stim_server:
     # Constructing the pipeline for classification
     filt = FilterEstimator(raw.info, 1, 40)
     scaler = preprocessing.StandardScaler()
-    concatenator = ConcatenateChannels()
+    vectorizer = EpochsVectorizer()
     clf = SVC(C=1, kernel='linear')
 
-    concat_classifier = Pipeline([('filter', filt), ('concat', concatenator),
+    concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
                                   ('scaler', scaler), ('svm', clf)])
 
     stim_server.start(verbose=True)
diff --git a/examples/simulation/README.txt b/examples/simulation/README.txt
new file mode 100644
index 0000000..bec6bcc
--- /dev/null
+++ b/examples/simulation/README.txt
@@ -0,0 +1,5 @@
+
+Data Simulation
+---------------
+
+Tools to generate simulation data.
diff --git a/examples/plot_simulate_evoked_data.py b/examples/simulation/plot_simulate_evoked_data.py
similarity index 58%
rename from examples/plot_simulate_evoked_data.py
rename to examples/simulation/plot_simulate_evoked_data.py
index 6fbed34..ef4c5ca 100644
--- a/examples/plot_simulate_evoked_data.py
+++ b/examples/simulation/plot_simulate_evoked_data.py
@@ -9,19 +9,18 @@ Generate simulated evoked data
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
 
 from mne import (read_proj, read_forward_solution, read_cov, read_label,
-                 pick_types_evoked, pick_types_forward, pick_types,
-                 read_evokeds)
-from mne.io import Raw
+                 pick_types_forward, pick_types)
+from mne.io import Raw, read_info
 from mne.datasets import sample
-from mne.time_frequency import iir_filter_raw, morlet
+from mne.time_frequency import fit_iir_model_raw
 from mne.viz import plot_sparse_source_estimates
-from mne.simulation import generate_sparse_stc, generate_evoked
+from mne.simulation import simulate_sparse_stc, simulate_evoked
+
+print(__doc__)
 
 ###############################################################################
 # Load real data as templates
@@ -38,45 +37,34 @@ cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
 
 fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
 fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
-
 cov = read_cov(cov_fname)
-
-condition = 'Left Auditory'
-evoked_template = read_evokeds(ave_fname, condition=condition, baseline=None)
-evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
-                                    exclude=raw.info['bads'])
+info = read_info(ave_fname)
 
 label_names = ['Aud-lh', 'Aud-rh']
 labels = [read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
           for ln in label_names]
 
 ###############################################################################
-# Generate source time courses and the correspond evoked data
-snr = 6  # dB
-tmin = -0.1
-sfreq = 1000.  # Hz
-tstep = 1. / sfreq
-n_samples = 600
-times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
-
-# Generate times series from 2 Morlet wavelets
-stc_data = np.zeros((len(labels), len(times)))
-Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
-stc_data[0][:len(Ws[0])] = np.real(Ws[0])
-stc_data[1][:len(Ws[1])] = np.real(Ws[1])
-stc_data *= 100 * 1e-9  # use nAm as unit
-
-# time translation
-stc_data[1] = np.roll(stc_data[1], 80)
-stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
-                          random_state=0)
+# Generate source time courses from 2 dipoles and the correspond evoked data
+
+times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
+rng = np.random.RandomState(42)
+
+
+def data_fun(times):
+    """Function to generate random source time courses"""
+    return (1e-9 * np.sin(30. * times) *
+            np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
+
+stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
+                          random_state=42, labels=labels, data_fun=data_fun)
 
 ###############################################################################
 # Generate noisy evoked data
 picks = pick_types(raw.info, meg=True, exclude='bads')
-iir_filter = iir_filter_raw(raw, order=5, picks=picks, tmin=60, tmax=180)
-evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
-                         tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
+snr = 6.  # dB
+evoked = simulate_evoked(fwd, stc, info, cov, snr, iir_filter=iir_filter)
 
 ###############################################################################
 # Plot
diff --git a/examples/simulation/plot_simulate_raw_data.py b/examples/simulation/plot_simulate_raw_data.py
new file mode 100644
index 0000000..d2d454b
--- /dev/null
+++ b/examples/simulation/plot_simulate_raw_data.py
@@ -0,0 +1,79 @@
+"""
+===========================
+Generate simulated raw data
+===========================
+
+This example generates raw data by repeating a desired source
+activation multiple times.
+
+"""
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Mark Wronkiewicz <wronk.mark at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from mne import read_source_spaces, find_events, Epochs, compute_covariance
+from mne.io import Raw
+from mne.datasets import sample
+from mne.simulation import simulate_sparse_stc, simulate_raw
+
+print(__doc__)
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
+src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
+bem_fname = (data_path +
+             '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')
+
+# Load real data as the template
+raw = Raw(raw_fname).crop(0., 30., copy=False)  # 30 sec is enough
+
+##############################################################################
+# Generate dipole time series
+n_dipoles = 4  # number of dipoles to create
+epoch_duration = 2.  # duration of each epoch/event
+n = 0  # harmonic number
+
+
+def data_fun(times):
+    """Generate time-staggered sinusoids at harmonics of 10Hz"""
+    global n
+    n_samp = len(times)
+    window = np.zeros(n_samp)
+    start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
+                   for ii in (2 * n, 2 * n + 1)]
+    window[start:stop] = 1.
+    n += 1
+    data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
+    data *= window
+    return data
+
+times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
+src = read_source_spaces(src_fname)
+stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
+                          data_fun=data_fun, random_state=0)
+# look at our source data
+fig, ax = plt.subplots(1)
+ax.plot(times, 1e9 * stc.data.T)
+ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
+fig.show()
+
+##############################################################################
+# Simulate raw data
+raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
+                       iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
+                       n_jobs=2, verbose=True)
+raw_sim.plot()
+
+##############################################################################
+# Plot evoked data
+events = find_events(raw_sim)  # only 1 pos, so event number == 1
+epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
+cov = compute_covariance(epochs, tmax=0., method='empirical')  # quick calc
+evoked = epochs.average()
+evoked.plot_white(cov)
diff --git a/examples/stats/plot_cluster_stats_evoked.py b/examples/stats/plot_cluster_stats_evoked.py
index c8d1156..4f31e88 100644
--- a/examples/stats/plot_cluster_stats_evoked.py
+++ b/examples/stats/plot_cluster_stats_evoked.py
@@ -8,18 +8,19 @@ between conditions. Multiple comparison problem is addressed
 with cluster level permutation test.
 
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.stats import permutation_cluster_test
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -58,14 +59,12 @@ condition2 = condition2[:, 0, :]  # take only one channel to get a 2D array
 # Compute statistic
 threshold = 6.0
 T_obs, clusters, cluster_p_values, H0 = \
-                permutation_cluster_test([condition1, condition2],
-                            n_permutations=1000, threshold=threshold, tail=1,
-                            n_jobs=2)
+    permutation_cluster_test([condition1, condition2], n_permutations=1000,
+                             threshold=threshold, tail=1, n_jobs=2)
 
 ###############################################################################
 # Plot
 times = epochs1.times
-import matplotlib.pyplot as plt
 plt.close('all')
 plt.subplot(211)
 plt.title('Channel : ' + channel)
diff --git a/examples/stats/plot_fdr_stats_evoked.py b/examples/stats/plot_fdr_stats_evoked.py
index bda24d4..86fbcf1 100644
--- a/examples/stats/plot_fdr_stats_evoked.py
+++ b/examples/stats/plot_fdr_stats_evoked.py
@@ -8,20 +8,21 @@ Multiple comparison problem is addressed with
 False Discovery Rate (FDR) correction.
 
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 from scipy import stats
+import matplotlib.pyplot as plt
+
 import mne
 from mne import io
 from mne.datasets import sample
 from mne.stats import bonferroni_correction, fdr_correction
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -65,7 +66,6 @@ threshold_fdr = np.min(np.abs(T)[reject_fdr])
 # Plot
 times = 1e3 * epochs.times
 
-import matplotlib.pyplot as plt
 plt.close('all')
 plt.plot(times, T, 'k', label='T-stat')
 xmin, xmax = plt.xlim()
diff --git a/examples/stats/plot_linear_regression_raw.py b/examples/stats/plot_linear_regression_raw.py
new file mode 100644
index 0000000..d4393e3
--- /dev/null
+++ b/examples/stats/plot_linear_regression_raw.py
@@ -0,0 +1,67 @@
+"""
+========================================
+Regression on continuous data (rER[P/F])
+========================================
+
+This demonstrates how rERPs/regressing the continuous data is a
+generalisation of traditional averaging. If all preprocessing steps
+are the same and if no overlap between epochs exists and if all
+predictors are binary, regression is virtually identical to traditional
+averaging.
+If overlap exists and/or predictors are continuous, traditional averaging
+is inapplicable, but regression can estimate, including those of
+continuous predictors.
+
+Note. This example is based on new code which may still not be
+memory-optimized. Be careful when working with a small computer.
+
+rERPs are described in:
+Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
+waveforms: II. Non-linear effects, overlap correction, and practical
+considerations. Psychophysiology, 52(2), 169-189.
+"""
+# Authors: Jona Sassenhagen <jona.sassenhagen at gmail.de>
+#
+# License: BSD (3-clause)
+
+import matplotlib.pyplot as plt
+
+import mne
+from mne.datasets import spm_face
+from mne.stats.regression import linear_regression_raw
+
+# Preprocess data
+data_path = spm_face.data_path()
+# Load and filter data, set up epochs
+raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw.fif'
+
+raw = mne.io.Raw(raw_fname, preload=True)  # Take first run
+
+picks = mne.pick_types(raw.info, meg=True, exclude='bads')
+raw.filter(1, 45, method='iir')
+
+events = mne.find_events(raw, stim_channel='UPPT001')
+event_id = dict(faces=1, scrambled=2)
+tmin, tmax = -.1, .5
+
+raw.pick_types(meg=True)
+
+# regular epoching
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
+                    baseline=None, preload=True, verbose=False, decim=4)
+
+# rERF
+evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
+                                reject=None, tmin=tmin, tmax=tmax,
+                                decim=4)
+# linear_regression_raw returns a dict of evokeds
+# select conditions similarly to mne.Epochs objects
+
+# plot both results
+cond = "faces"
+fig, (ax1, ax2) = plt.subplots(1, 2)
+epochs[cond].average().plot(axes=ax1, show=False)
+evokeds[cond].plot(axes=ax2, show=False)
+ax1.set_title("Traditional averaging")
+ax2.set_title("rERF")
+plt.show()
diff --git a/examples/stats/plot_sensor_permutation_test.py b/examples/stats/plot_sensor_permutation_test.py
index 51614cf..5aae6bc 100644
--- a/examples/stats/plot_sensor_permutation_test.py
+++ b/examples/stats/plot_sensor_permutation_test.py
@@ -8,13 +8,10 @@ during a fixed time window of interest. Here computation
 is performed on MNE sample dataset between 40 and 60 ms.
 
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 
 import mne
@@ -22,6 +19,8 @@ from mne import io
 from mne.stats import permutation_t_test
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -69,7 +68,7 @@ evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis],
 stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names)
 mask = p_values[:, np.newaxis] <= 0.05
 
-evoked.plot_topomap(ch_type='grad', times=[0], scale=1, time_format=None,
-                    cmap='Reds', vmin=0., vmax=np.max,
-                    unit='-log10(p)', format='-%0.1f', mask=mask,
+evoked.plot_topomap(ch_type='grad', times=[0], scale=1,
+                    time_format=None, cmap='Reds', vmin=0., vmax=np.max,
+                    unit='-log10(p)', cbar_fmt='-%0.1f', mask=mask,
                     size=3, show_names=lambda x: x[4:] + ' ' * 20)
diff --git a/examples/stats/plot_sensor_regression.py b/examples/stats/plot_sensor_regression.py
index 26abe37..a28cd2e 100644
--- a/examples/stats/plot_sensor_regression.py
+++ b/examples/stats/plot_sensor_regression.py
@@ -1,7 +1,7 @@
 """
-====================================================================
+=====================================
 Sensor space least squares regression
-====================================================================
+=====================================
 
 Predict single trial activity from a continuous variable.
 A single-trial regression is performed in each sensor and timepoint
@@ -21,14 +21,14 @@ revealed by linear regression analysis of ERP data. Neuroimage.)
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 
 import mne
 from mne.datasets import sample
 from mne.stats.regression import linear_regression
 
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -65,15 +65,12 @@ lm = linear_regression(epochs, design_matrix, names)
 
 
 def plot_topomap(x, unit):
-    x.plot_topomap(ch_type='mag', scale=1, size=1.5, vmax=np.max, unit=unit,
-                   times=np.linspace(0.1, 0.2, 5))
+    x.plot_topomap(ch_type='mag', scale=1, size=1.5, vmax=np.max,
+                   unit=unit, times=np.linspace(0.1, 0.2, 5))
 
 trial_count = lm['trial-count']
 
 plot_topomap(trial_count.beta, unit='z (beta)')
-
 plot_topomap(trial_count.t_val, unit='t')
-
 plot_topomap(trial_count.mlog10_p_val, unit='-log10 p')
-
 plot_topomap(trial_count.stderr, unit='z (error)')
diff --git a/examples/time_frequency/plot_compute_raw_data_spectrum.py b/examples/time_frequency/plot_compute_raw_data_spectrum.py
index 4eeb6a2..bc9100c 100644
--- a/examples/time_frequency/plot_compute_raw_data_spectrum.py
+++ b/examples/time_frequency/plot_compute_raw_data_spectrum.py
@@ -12,8 +12,6 @@ to the data to reduce ECG and EOG artifacts.
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
 
@@ -21,6 +19,8 @@ import mne
 from mne import io, read_proj, read_selection
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -43,30 +43,30 @@ n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
 plt.ion()
 
 # Let's first check out all channel types
-raw.plot_psds(area_mode='range', tmax=10.0)
+raw.plot_psd(area_mode='range', tmax=10.0)
 
 # Now let's focus on a smaller subset:
 # Pick MEG magnetometers in the Left-temporal region
 selection = read_selection('Left-temporal')
 picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
-                        stim=False, exclude='bads', selection=selection)
+                       stim=False, exclude='bads', selection=selection)
 
 # Let's just look at the first few channels for demonstration purposes
 picks = picks[:4]
 
 plt.figure()
 ax = plt.axes()
-raw.plot_psds(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
-              n_jobs=1, proj=False, ax=ax, color=(0, 0, 1),  picks=picks)
+raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
+             n_jobs=1, proj=False, ax=ax, color=(0, 0, 1),  picks=picks)
 
 # And now do the same with SSP applied
-raw.plot_psds(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
-              n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks)
+raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
+             n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks)
 
 # And now do the same with SSP + notch filtering
 raw.notch_filter(np.arange(60, 241, 60), picks=picks, n_jobs=1)
-raw.plot_psds(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
-              n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks)
+raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
+             n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks)
 
 ax.set_title('Four left-temporal magnetometers')
 plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
diff --git a/examples/time_frequency/plot_compute_source_psd_epochs.py b/examples/time_frequency/plot_compute_source_psd_epochs.py
index f3d2952..3196a3a 100644
--- a/examples/time_frequency/plot_compute_source_psd_epochs.py
+++ b/examples/time_frequency/plot_compute_source_psd_epochs.py
@@ -8,20 +8,19 @@ to a brain label. The PSD is computed using a multi-taper method with
 Discrete Prolate Spheroidal Sequence (DPSS) windows.
 
 """
-
 # Author: Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
+
 import mne
 from mne.datasets import sample
 from mne.io import Raw
 from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs
 
+print(__doc__)
 
 data_path = sample.data_path()
 fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
@@ -47,7 +46,7 @@ raw.info['bads'] += ['EEG 053']  # bads + 1 more
 
 # pick MEG channels
 picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
-                   include=include, exclude='bads')
+                       include=include, exclude='bads')
 # Read epochs
 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
diff --git a/examples/time_frequency/plot_epochs_spectra.py b/examples/time_frequency/plot_epochs_spectra.py
new file mode 100644
index 0000000..7c35d9f
--- /dev/null
+++ b/examples/time_frequency/plot_epochs_spectra.py
@@ -0,0 +1,45 @@
+"""
+=============================================
+Compute the power spectral density of epochs
+=============================================
+
+This script shows how to compute the power spectral density (PSD)
+of measurements on epochs. It also shows how to plot its spatial
+distribution.
+"""
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne import io
+from mne.datasets import sample
+
+print(__doc__)
+
+###############################################################################
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+tmin, tmax, event_id = -1., 1., 1
+raw.info['bads'] += ['MEG 2443']  # bads
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
+                    proj=True, baseline=(None, 0), preload=True,
+                    reject=dict(grad=4000e-13, eog=150e-6))
+
+# Let's first check out all channel types by averaging across epochs.
+epochs.plot_psd(fmin=2, fmax=200)
+
+# picks MEG gradiometers
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=False,
+                       stim=False, exclude='bads')
+
+# Now let's take a look at the spatial distributions of the psd.
+epochs.plot_psd_topomap(ch_type='grad', normalize=True)
diff --git a/examples/time_frequency/plot_single_trial_spectra.py b/examples/time_frequency/plot_single_trial_spectra.py
deleted file mode 100644
index e20cded..0000000
--- a/examples/time_frequency/plot_single_trial_spectra.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-======================================
-Investigate Single Trial Power Spectra
-======================================
-
-In this example we will look at single trial spectra and then
-compute average spectra to identify channels and
-frequencies of interest for subsequent TFR analyses.
-"""
-
-# Authors: Denis Engemann <denis.engemann at gmail.com>
-#
-# License: BSD (3-clause)
-
-print(__doc__)
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-import mne
-from mne import io
-from mne.datasets import sample
-from mne.time_frequency import compute_epochs_psd
-###############################################################################
-# Set parameters
-data_path = sample.data_path()
-raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
-event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
-
-# Setup for reading the raw data
-raw = io.Raw(raw_fname)
-events = mne.read_events(event_fname)
-
-tmin, tmax, event_id = -1., 1., 1
-include = []
-raw.info['bads'] += ['MEG 2443']  # bads
-
-# picks MEG gradiometers
-picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                       stim=False, include=include, exclude='bads')
-
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, proj=True,
-                    baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
-
-
-n_fft = 256  # the FFT size. Ideally a power of 2
-psds, freqs = compute_epochs_psd(epochs, fmin=2, fmax=200, n_fft=n_fft,
-                                 n_jobs=2)
-
-# average psds and save psds from first trial separately
-average_psds = psds.mean(0)
-average_psds = 10 * np.log10(average_psds)  # transform into dB
-some_psds = 10 * np.log10(psds[12])
-
-
-fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(10, 5))
-
-fig.suptitle('Single trial power', fontsize=12)
-
-freq_mask = freqs < 150
-freqs = freqs[freq_mask]
-
-ax1.set_title('single trial', fontsize=10)
-ax1.imshow(some_psds[:, freq_mask].T, aspect='auto', origin='lower')
-ax1.set_yticks(np.arange(0, len(freqs), 10))
-ax1.set_yticklabels(freqs[::10].round(1))
-ax1.set_ylabel('Frequency (Hz)')
-
-ax2.set_title('averaged over trials', fontsize=10)
-ax2.imshow(average_psds[:, freq_mask].T, aspect='auto', origin='lower')
-ax2.set_xticks(np.arange(0, len(picks), 30))
-ax2.set_xticklabels(picks[::30])
-ax2.set_xlabel('MEG channel index (Gradiometers)')
-
-mne.viz.tight_layout()
-plt.show()
-
-# In the second image we clearly observe certain channel groups exposing
-# stronger power than others. Second, in comparison to the single
-# trial image we can see the frequency extent slightly growing for these
-# channels which might indicate oscillatory responses.
-# The ``plot_time_frequency.py`` example investigates one of the channels
-# around index 140.
-# Finally, also note the power line artifacts across all channels.
-
-# Now let's take a look at the spatial distributions of the lower frequencies
-# Note. We're 'abusing' the Evoked.plot_topomap method here to display
-# our average powermap
-
-evoked = epochs.average()  # create evoked
-evoked.data = average_psds[:, freq_mask]  # insert our psd data
-evoked.times = freqs  # replace times with frequencies.
-evoked.plot_topomap(ch_type='grad', times=range(5, 12, 2),
-                    scale=1, scale_time=1, time_format='%0.1f Hz',
-                    cmap='Reds', vmin=np.min, vmax=np.max,
-                    unit='dB', format='-%0.1f')
diff --git a/examples/time_frequency/plot_source_label_time_frequency.py b/examples/time_frequency/plot_source_label_time_frequency.py
index 6a39259..5480522 100644
--- a/examples/time_frequency/plot_source_label_time_frequency.py
+++ b/examples/time_frequency/plot_source_label_time_frequency.py
@@ -15,15 +15,16 @@ latter also includes evoked (stimulus-locked) activity.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, source_induced_power
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -44,7 +45,7 @@ raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # Picks MEG channels
 picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+                       stim=False, include=include, exclude='bads')
 reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
 
 # Load epochs
@@ -61,16 +62,15 @@ n_cycles = frequencies / 3.  # different number of cycle per frequency
 # subtract the evoked response in order to exclude evoked activity
 epochs_induced = epochs.copy().subtract_evoked()
 
-import matplotlib.pyplot as plt
 plt.close('all')
 
 for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
                                               ['evoked + induced',
                                                'induced only'])):
     # compute the source space power and phase lock
-    power, phase_lock = source_induced_power(this_epochs, inverse_operator,
-        frequencies, label, baseline=(-0.1, 0), baseline_mode='percent',
-        n_cycles=n_cycles, n_jobs=1)
+    power, phase_lock = source_induced_power(
+        this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
+        baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
 
     power = np.mean(power, axis=0)  # average over sources
     phase_lock = np.mean(phase_lock, axis=0)  # average over sources
@@ -82,7 +82,7 @@ for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
     plt.subplot(2, 2, 2 * ii + 1)
     plt.imshow(20 * power,
                extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-               aspect='auto', origin='lower', vmin=0., vmax=30.)
+               aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
     plt.xlabel('Time (s)')
     plt.ylabel('Frequency (Hz)')
     plt.title('Power (%s)' % title)
@@ -91,7 +91,8 @@ for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
     plt.subplot(2, 2, 2 * ii + 2)
     plt.imshow(phase_lock,
                extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-               aspect='auto', origin='lower', vmin=0, vmax=0.7)
+               aspect='auto', origin='lower', vmin=0, vmax=0.7,
+               cmap='RdBu_r')
     plt.xlabel('Time (s)')
     plt.ylabel('Frequency (Hz)')
     plt.title('Phase-lock (%s)' % title)
diff --git a/examples/time_frequency/plot_source_power_spectrum.py b/examples/time_frequency/plot_source_power_spectrum.py
index a998744..3eeadb5 100644
--- a/examples/time_frequency/plot_source_power_spectrum.py
+++ b/examples/time_frequency/plot_source_power_spectrum.py
@@ -10,13 +10,15 @@ Returns an STC file containing the PSD (in dB) of each of the sources.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, compute_source_psd
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -32,7 +34,7 @@ raw.info['bads'] = ['MEG 2443', 'EEG 053']
 
 # picks MEG gradiometers
 picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
-                        stim=False, exclude='bads')
+                       stim=False, exclude='bads')
 
 tmin, tmax = 0, 120  # use the first 120s of data
 fmin, fmax = 4, 100  # look at frequencies between 4 and 100Hz
@@ -47,7 +49,6 @@ stc.save('psd_dSPM')
 
 ###############################################################################
 # View PSD of sources in label
-import matplotlib.pyplot as plt
 plt.plot(1e3 * stc.times, stc.data.T)
 plt.xlabel('Frequency (Hz)')
 plt.ylabel('PSD (dB)')
diff --git a/examples/time_frequency/plot_source_space_time_frequency.py b/examples/time_frequency/plot_source_space_time_frequency.py
index 86ed0da..0c7175a 100644
--- a/examples/time_frequency/plot_source_space_time_frequency.py
+++ b/examples/time_frequency/plot_source_space_time_frequency.py
@@ -12,13 +12,15 @@ is linear based on dSPM inverse operator.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.datasets import sample
 from mne.minimum_norm import read_inverse_operator, source_band_induced_power
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -36,7 +38,7 @@ raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
 # picks MEG gradiometers
 picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
-                        stim=False, include=include, exclude='bads')
+                       stim=False, include=include, exclude='bads')
 
 # Load condition 1
 event_id = 1
@@ -57,7 +59,6 @@ for b, stc in stcs.iteritems():
 
 ###############################################################################
 # plot mean power
-import matplotlib.pyplot as plt
 plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
 plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
 plt.xlabel('Time (ms)')
diff --git a/examples/time_frequency/plot_stockwell.py b/examples/time_frequency/plot_stockwell.py
new file mode 100644
index 0000000..8916a08
--- /dev/null
+++ b/examples/time_frequency/plot_stockwell.py
@@ -0,0 +1,50 @@
+"""
+=======================================================
+Time frequency with Stockwell transform in sensor space
+=======================================================
+
+This script shows how to compute induced power and intertrial coherence
+using the Stockwell transform, a.k.a. S-Transform.
+
+"""
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne import io
+from mne.time_frequency import tfr_stockwell
+from mne.datasets import somato
+
+print(__doc__)
+
+###############################################################################
+# Set parameters
+data_path = somato.data_path()
+raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
+event_id, tmin, tmax = 1, -1., 3.
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname)
+baseline = (None, 0)
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# picks MEG gradiometers
+picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
+                    preload=True)
+
+###############################################################################
+# Calculate power and intertrial coherence
+
+epochs = epochs.pick_channels([epochs.ch_names[82]])  # reduce computation
+
+power, itc = tfr_stockwell(epochs, fmin=6., fmax=30., decim=4, n_jobs=1,
+                           width=.3, return_itc=True)
+
+power.plot([0], baseline=(-0.5, 0), mode=None, title='S-transform (power)')
+
+itc.plot([0], baseline=None, mode=None, title='S-transform (ITC)')
diff --git a/examples/time_frequency/plot_temporal_whitening.py b/examples/time_frequency/plot_temporal_whitening.py
index ca92eec..d6c7cab 100644
--- a/examples/time_frequency/plot_temporal_whitening.py
+++ b/examples/time_frequency/plot_temporal_whitening.py
@@ -11,15 +11,16 @@ to temporally whiten the signals.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 from scipy import signal
 import matplotlib.pyplot as plt
 
 import mne
-from mne.time_frequency import ar_raw
+from mne.time_frequency import fit_iir_model_raw
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
@@ -37,14 +38,11 @@ order = 5  # define model order
 picks = picks[:5]
 
 # Estimate AR models on raw data
-coefs = ar_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
-mean_coefs = np.mean(coefs, axis=0)  # mean model across channels
-
-filt = np.r_[1, -mean_coefs]  # filter coefficient
+b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
 d, times = raw[0, 1e4:2e4]  # look at one channel from now on
 d = d.ravel()  # make flat vector
-innovation = signal.convolve(d, filt, 'valid')
-d_ = signal.lfilter([1], filt, innovation)  # regenerate the signal
+innovation = signal.convolve(d, a, 'valid')
+d_ = signal.lfilter(b, a, innovation)  # regenerate the signal
 d_ = np.r_[d_[0] * np.ones(order), d_]  # dummy samples to keep signal length
 
 ###############################################################################
diff --git a/examples/time_frequency/plot_time_frequency_multitaper_sensors.py b/examples/time_frequency/plot_time_frequency_multitaper_sensors.py
new file mode 100644
index 0000000..6fa4774
--- /dev/null
+++ b/examples/time_frequency/plot_time_frequency_multitaper_sensors.py
@@ -0,0 +1,55 @@
+"""
+===============================================
+Time-frequency analysis using multitaper method
+===============================================
+
+This examples computes induced power and intertrial
+coherence (ITC) using a multitaper method on a somato sensory MEG data.
+The power plot is rendered so that baseline is mean zero.
+"""
+# Authors: Hari Bharadwaj <hari at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+import mne
+from mne import io
+from mne.time_frequency import tfr_multitaper
+from mne.datasets import somato
+
+print(__doc__)
+
+###############################################################################
+# Load real somatosensory sample data.
+data_path = somato.data_path()
+raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
+event_id, tmin, tmax = 1, -1., 3.
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname)
+baseline = (None, 0)
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# Pick a good channel for somatosensory responses.
+picks = [raw.info['ch_names'].index('MEG 1142'), ]
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=baseline, reject=dict(grad=4000e-13))
+
+###############################################################################
+# Calculate power
+
+freqs = np.arange(5., 50., 2.)  # define frequencies of interest
+n_cycles = freqs / 2.  # 0.5 second time windows for all frequencies
+
+# Choose time x (full) bandwidth product
+time_bandwidth = 4.0  # With 0.5 s time windows, this gives 8 Hz smoothing
+
+power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
+                            use_fft=True, time_bandwidth=time_bandwidth,
+                            return_itc=True, n_jobs=1)
+
+# Plot results (with baseline correction only for power)
+power.plot([0], baseline=(-0.5, 0), mode='mean', title='MEG 1142 - Power')
+itc.plot([0], title='MEG 1142 - Intertrial Coherence')
diff --git a/examples/time_frequency/plot_time_frequency_sensors.py b/examples/time_frequency/plot_time_frequency_sensors.py
index 39a6431..3e9cf34 100644
--- a/examples/time_frequency/plot_time_frequency_sensors.py
+++ b/examples/time_frequency/plot_time_frequency_sensors.py
@@ -5,19 +5,21 @@ Time-frequency representations on topographies for MEG sensors
 
 Both average power and intertrial coherence are displayed.
 """
-print(__doc__)
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import numpy as np
+import matplotlib.pyplot as plt
+
 import mne
 from mne import io
 from mne.time_frequency import tfr_morlet
 from mne.datasets import somato
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = somato.data_path()
@@ -40,7 +42,7 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
 
 freqs = np.arange(6, 30, 3)  # define frequencies of interest
 n_cycles = freqs / 2.  # different number of cycle per frequency
-power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False,
+power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
                         return_itc=True, decim=3, n_jobs=1)
 
 # Baseline correction can be applied to power or done in plots
@@ -51,14 +53,13 @@ power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=False,
 power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
 power.plot([82], baseline=(-0.5, 0), mode='logratio')
 
-import matplotlib.pyplot as plt
 fig, axis = plt.subplots(1, 2, figsize=(7, 4))
 power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
                    baseline=(-0.5, 0), mode='logratio', axes=axis[0],
-                   title='Alpha', vmin=-0.45, vmax=0.45)
+                   title='Alpha', vmax=0.45)
 power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
                    baseline=(-0.5, 0), mode='logratio', axes=axis[1],
-                   title='Beta', vmin=-0.45, vmax=0.45)
+                   title='Beta', vmax=0.45)
 mne.viz.tight_layout()
 
 # Inspect ITC
diff --git a/examples/time_frequency/plot_time_frequency_simulated.py b/examples/time_frequency/plot_time_frequency_simulated.py
new file mode 100644
index 0000000..f99e84b
--- /dev/null
+++ b/examples/time_frequency/plot_time_frequency_simulated.py
@@ -0,0 +1,112 @@
+"""
+========================================================
+Time-frequency on simulated data (Multitaper vs. Morlet)
+========================================================
+
+This examples demonstrates on simulated data the different time-frequency
+estimation methods. It shows the time-frequency resolution trade-off
+and the problem of estimation variance.
+"""
+# Authors: Hari Bharadwaj <hari at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from mne import create_info, EpochsArray
+from mne.time_frequency import tfr_multitaper, tfr_stockwell, tfr_morlet
+
+print(__doc__)
+
+###############################################################################
+# Simulate data
+
+sfreq = 1000.0
+ch_names = ['SIM0001', 'SIM0002']
+ch_types = ['grad', 'grad']
+info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+
+n_times = int(sfreq)  # 1 second long epochs
+n_epochs = 40
+seed = 42
+rng = np.random.RandomState(seed)
+noise = rng.randn(n_epochs, len(ch_names), n_times)
+
+# Add a 50 Hz sinusoidal burst to the noise and ramp it.
+t = np.arange(n_times, dtype=np.float) / sfreq
+signal = np.sin(np.pi * 2. * 50. * t)  # 50 Hz sinusoid signal
+signal[np.logical_or(t < 0.45, t > 0.55)] = 0.  # Hard windowing
+on_time = np.logical_and(t >= 0.45, t <= 0.55)
+signal[on_time] *= np.hanning(on_time.sum())  # Ramping
+data = noise + signal
+
+reject = dict(grad=4000)
+events = np.empty((n_epochs, 3), dtype=int)
+first_event_sample = 100
+event_id = dict(sin50hz=1)
+for k in range(n_epochs):
+    events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
+
+epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
+                     reject=reject)
+
+
+###############################################################################
+# Consider different parameter possibilities for multitaper convolution
+freqs = np.arange(5., 100., 3.)
+
+# You can trade time resolution or frequency resolution or both
+# in order to get a reduction in variance
+
+# (1) Least smoothing (most variance/background fluctuations).
+n_cycles = freqs / 2.
+time_bandwidth = 2.0  # Least possible frequency-smoothing (1 taper)
+power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
+                       time_bandwidth=time_bandwidth, return_itc=False)
+# Plot results. Baseline correct based on first 100 ms.
+power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
+           title='Sim: Least smoothing, most variance')
+
+
+# (2) Less frequency smoothing, more time smoothing.
+n_cycles = freqs  # Increase time-window length to 1 second.
+time_bandwidth = 4.0  # Same frequency-smoothing as (1) 3 tapers.
+power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
+                       time_bandwidth=time_bandwidth, return_itc=False)
+# Plot results. Baseline correct based on first 100 ms.
+power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
+           title='Sim: Less frequency smoothing, more time smoothing')
+
+
+# (3) Less time smoothing, more frequency smoothing.
+n_cycles = freqs / 2.
+time_bandwidth = 8.0  # Same time-smoothing as (1), 7 tapers.
+power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
+                       time_bandwidth=time_bandwidth, return_itc=False)
+# Plot results. Baseline correct based on first 100 ms.
+power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
+           title='Sim: Less time smoothing, more frequency smoothing')
+
+# #############################################################################
+# Stockwell (S) transform
+
+# S uses a Gaussian window to balance temporal and spectral resolution
+# Importantly, frequency bands are phase-normalized, hence strictly comparable
+# with regard to timing, and, the input signal can be recoverd from the
+# transform in a lossless way if we disregard numerical errors.
+
+fmin, fmax = freqs[[0, -1]]
+for width in (0.7, 3.0):
+    power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
+    power.plot([0], baseline=(0., 0.1), mode='mean',
+               title='Sim: Using S transform, width '
+                     '= {:0.1f}'.format(width), show=True)
+
+# #############################################################################
+# Finally, compare to morlet wavelet
+
+n_cycles = freqs / 2.
+power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, return_itc=False)
+power.plot([0], baseline=(0., 0.1), mode='mean', vmin=-1., vmax=3.,
+           title='Sim: Using Morlet wavelet')
diff --git a/examples/visualization/README.txt b/examples/visualization/README.txt
new file mode 100644
index 0000000..25af47a
--- /dev/null
+++ b/examples/visualization/README.txt
@@ -0,0 +1,5 @@
+
+Visualization
+-------------
+
+Looking at data and processing output.
diff --git a/examples/visualization/make_report.py b/examples/visualization/make_report.py
new file mode 100644
index 0000000..ce462f2
--- /dev/null
+++ b/examples/visualization/make_report.py
@@ -0,0 +1,37 @@
+"""
+================================
+Make an MNE-Report with a Slider
+================================
+
+In this example, MEG evoked data are plotted in an html slider.
+"""
+
+# Authors: Teon Brooks <teon.brooks at gmail.com
+#
+# License: BSD (3-clause)
+
+from mne.report import Report
+from mne.datasets import sample
+from mne import read_evokeds
+from matplotlib import pyplot as plt
+
+
+report = Report()
+path = sample.data_path()
+fname = path + '/MEG/sample/sample_audvis-ave.fif'
+
+# Load the evoked data
+evoked = read_evokeds(fname, condition='Left Auditory',
+                      baseline=(None, 0), verbose=False)
+evoked.crop(0, .2)
+times = evoked.times[::4]
+# Create a list of figs for the slider
+figs = list()
+for time in times:
+    figs.append(evoked.plot_topomap(time, vmin=-300, vmax=300,
+                                    res=100, show=False))
+    plt.close(figs[-1])
+report.add_slider_to_section(figs, times, 'Evoked Response')
+
+# # to save report
+# report.save('foobar.html', True)
diff --git a/examples/plot_channel_epochs_image.py b/examples/visualization/plot_channel_epochs_image.py
similarity index 91%
rename from examples/plot_channel_epochs_image.py
rename to examples/visualization/plot_channel_epochs_image.py
index 8c61ff5..62f9764 100644
--- a/examples/plot_channel_epochs_image.py
+++ b/examples/visualization/plot_channel_epochs_image.py
@@ -15,10 +15,8 @@ embedding as described in:
 Graph-based variability estimation in single-trial event-related neural
 responses A. Gramfort, R. Keriven, M. Clerc, 2010,
 Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
-http://hal.inria.fr/inria-00497023
+https://hal.inria.fr/inria-00497023
 """
-print(__doc__)
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
@@ -29,6 +27,9 @@ import matplotlib.pyplot as plt
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -56,8 +57,8 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
 
 # and order with spectral reordering
 # If you don't have scikit-learn installed set order_func to None
-from sklearn.cluster.spectral import spectral_embedding
-from sklearn.metrics.pairwise import rbf_kernel
+from sklearn.cluster.spectral import spectral_embedding  # noqa
+from sklearn.metrics.pairwise import rbf_kernel   # noqa
 
 
 def order_func(times, data):
@@ -70,5 +71,5 @@ good_pick = 97  # channel with a clear evoked response
 bad_pick = 98  # channel with no evoked response
 
 plt.close('all')
-mne.viz.plot_image_epochs(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
+mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
                           vmax=250, colorbar=True, order=order_func, show=True)
diff --git a/examples/visualization/plot_clickable_image.py b/examples/visualization/plot_clickable_image.py
new file mode 100644
index 0000000..9ae4bbc
--- /dev/null
+++ b/examples/visualization/plot_clickable_image.py
@@ -0,0 +1,66 @@
+"""
+================================================================
+Demonstration of how to use ClickableImage / generate_2d_layout.
+================================================================
+
+In this example, we open an image file, then use ClickableImage to
+return 2D locations of mouse clicks (or load a file already created).
+Then, we use generate_2d_layout to turn those xy positions into a layout
+for use with plotting topo maps. In this way, you can take arbitrary xy
+positions and turn them into a plottable layout.
+"""
+# Authors: Christopher Holdgraf <choldgraf at berkeley.edu>
+#
+# License: BSD (3-clause)
+from scipy.ndimage import imread
+import numpy as np
+from matplotlib import pyplot as plt
+from os import path as op
+import mne
+from mne.viz import ClickableImage, add_background_image  # noqa
+from mne.channels import generate_2d_layout  # noqa
+
+print(__doc__)
+
+# Set parameters and paths
+plt.rcParams['image.cmap'] = 'gray'
+im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif')
+
+# We've already clicked and exported
+layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
+layout_name = 'custom_layout.lout'
+
+###############################################################################
+# Load data and click
+im = imread(im_path)
+plt.imshow(im)
+"""
+This code opens the image so you can click on it. Commented out
+because we've stored the clicks as a layout file already.
+
+# The click coordinates are stored as a list of tuples
+click = ClickableImage(im)
+click.plot_clicks()
+coords = click.coords
+
+# Generate a layout from our clicks and normalize by the image
+lt = generate_2d_layout(np.vstack(coords), bg_image=im)
+lt.save(layout_path + layout_name)  # To save if we want
+"""
+# We've already got the layout, load it
+lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
+
+# Create some fake data
+nchans = len(lt.pos)
+nepochs = 50
+sr = 1000
+nsec = 5
+events = np.arange(nepochs).reshape([-1, 1])
+events = np.hstack([events, np.zeros([nepochs, 2])])
+data = np.random.randn(nepochs, nchans, sr * nsec)
+info = mne.create_info(nchans, sr, ch_types='eeg')
+epochs = mne.EpochsArray(data, info, events)
+evoked = epochs.average()
+
+# Using the native plot_topo function with the image plotted in the background
+f = evoked.plot_topo(layout=lt, fig_background=im)
diff --git a/examples/plot_evoked_delayed_ssp.py b/examples/visualization/plot_evoked_delayed_ssp.py
similarity index 99%
rename from examples/plot_evoked_delayed_ssp.py
rename to examples/visualization/plot_evoked_delayed_ssp.py
index 7abf6e8..c746ba9 100644
--- a/examples/plot_evoked_delayed_ssp.py
+++ b/examples/visualization/plot_evoked_delayed_ssp.py
@@ -17,12 +17,13 @@ on the evoked data.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
diff --git a/examples/visualization/plot_evoked_erf_erp.py b/examples/visualization/plot_evoked_erf_erp.py
new file mode 100644
index 0000000..ed0e86d
--- /dev/null
+++ b/examples/visualization/plot_evoked_erf_erp.py
@@ -0,0 +1,51 @@
+"""
+=================================
+Plotting ERF/ERP with evoked data
+=================================
+
+Load evoked data and plot.
+
+"""
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import matplotlib.pyplot as plt
+from mne.datasets import sample
+from mne import read_evokeds
+
+print(__doc__)
+
+path = sample.data_path()
+fname = path + '/MEG/sample/sample_audvis-ave.fif'
+
+# load evoked and subtract baseline
+condition = 'Left Auditory'
+evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
+
+# Note: You can paint the area with left mouse button to show the topographic
+# map of the N100.
+
+evoked.plot()
+
+###############################################################################
+# Or plot manually after extracting peak latency
+
+evoked = evoked.pick_types(meg=False, eeg=True)
+times = 1e3 * evoked.times  # time in miliseconds
+
+ch_max_name, latency = evoked.get_peak(mode='neg')
+
+plt.figure()
+plt.plot(times, 1e6 * evoked.data.T, 'k-')
+plt.xlim([times[0], times[-1]])
+plt.xlabel('time (ms)')
+plt.ylabel('Potential (uV)')
+plt.title('EEG evoked potential')
+
+plt.axvline(latency * 1e3, color='red',
+            label=ch_max_name, linewidth=2,
+            linestyle='--')
+plt.legend(loc='best')
+
+plt.show()
diff --git a/examples/plot_evoked_topomap.py b/examples/visualization/plot_evoked_topomap.py
similarity index 88%
rename from examples/plot_evoked_topomap.py
rename to examples/visualization/plot_evoked_topomap.py
index 7793540..dc9ece6 100644
--- a/examples/plot_evoked_topomap.py
+++ b/examples/visualization/plot_evoked_topomap.py
@@ -12,13 +12,13 @@ Load evoked data and plot topomaps for selected time points.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import matplotlib.pyplot as plt
 from mne.datasets import sample
 from mne import read_evokeds
 
+print(__doc__)
+
 path = sample.data_path()
 fname = path + '/MEG/sample/sample_audvis-ave.fif'
 
@@ -33,10 +33,13 @@ times = np.arange(0.05, 0.15, 0.01)
 # plot magnetometer data as topomaps
 evoked.plot_topomap(times, ch_type='mag')
 
+# compute a 50 ms bin to stabilize topographies
+evoked.plot_topomap(times, ch_type='mag', average=0.05)
+
 # plot gradiometer data (plots the RMS for each pair of gradiometers)
 evoked.plot_topomap(times, ch_type='grad')
 
-# plot magnetometer data as topomap at 1 time point : 100ms
+# plot magnetometer data as topomap at 1 time point : 100 ms
 # and add channel labels and title
 evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
                     size=6, res=128, title='Auditory response')
diff --git a/examples/plot_evoked_topomap_delayed_ssp.py b/examples/visualization/plot_evoked_topomap_delayed_ssp.py
similarity index 97%
rename from examples/plot_evoked_topomap_delayed_ssp.py
rename to examples/visualization/plot_evoked_topomap_delayed_ssp.py
index 82745e3..b134294 100644
--- a/examples/plot_evoked_topomap_delayed_ssp.py
+++ b/examples/visualization/plot_evoked_topomap_delayed_ssp.py
@@ -15,12 +15,13 @@ SSP application.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -58,4 +59,4 @@ evoked = epochs.average()  # average epochs and get an Evoked dataset.
 times = np.arange(0.05, 0.15, 0.01)
 
 evoked.plot_topomap(times, proj='interactive')
-# Hint: the same works for evoked.plot and viz.plot_topo
+# Hint: the same works for evoked.plot and evoked.plot_topo
diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/plot_evoked_whitening.py
new file mode 100644
index 0000000..9231432
--- /dev/null
+++ b/examples/visualization/plot_evoked_whitening.py
@@ -0,0 +1,80 @@
+"""
+=============================================
+Whitening evoked data with a noise covariance
+=============================================
+
+Evoked data are loaded and then whitened using a given noise covariance
+matrix. It's an excellent quality check to see if baseline signals match
+the assumption of Gaussian white noise from which we expect values around
+0 with less than 2 standard deviations. Covariance estimation and diagnostic
+plots are based on [1].
+
+References
+----------
+[1] Engemann D. and Gramfort A. (2015) Automated model selection in covariance
+    estimation and spatial whitening of MEG and EEG signals, vol. 108,
+    328-342, NeuroImage.
+
+"""
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import mne
+
+from mne import io
+from mne.datasets import sample
+from mne.cov import compute_covariance
+
+print(__doc__)
+
+###############################################################################
+# Set parameters
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+
+raw = io.Raw(raw_fname, preload=True)
+raw.filter(1, 40, method='iir', n_jobs=1)
+raw.info['bads'] += ['MEG 2443']  # bads + 1 more
+events = mne.read_events(event_fname)
+
+# let's look at rare events, button presses
+event_id, tmin, tmax = 2, -0.2, 0.5
+picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, exclude='bads')
+reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=None, reject=reject, preload=True)
+
+# Uncomment next line to use fewer samples and study regularization effects
+# epochs = epochs[:20]  # For your data, use as many samples as you can!
+
+###############################################################################
+# Compute covariance using automated regularization
+noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
+                                return_estimators=True, verbose=True, n_jobs=1,
+                                projs=None)
+
+# With "return_estimator=True" all estimated covariances sorted
+# by log-likelihood are returned.
+
+print('Covariance estimates sorted from best to worst')
+for c in noise_covs:
+    print("%s : %s" % (c['method'], c['loglik']))
+
+###############################################################################
+# Show whitening
+
+evoked = epochs.average()
+
+evoked.plot()  # plot evoked response
+
+# plot the whitened evoked data for to see if baseline signals match the
+# assumption of Gaussian white noise from which we expect values around
+# 0 with less than 2 standard deviations. For the Global field power we expect
+# a value of 1.
+
+evoked.plot_white(noise_covs)
diff --git a/examples/plot_meg_eeg_fields_3d.py b/examples/visualization/plot_meg_eeg_fields_3d.py
similarity index 65%
rename from examples/plot_meg_eeg_fields_3d.py
rename to examples/visualization/plot_meg_eeg_fields_3d.py
index e863edd..61556b5 100644
--- a/examples/plot_meg_eeg_fields_3d.py
+++ b/examples/visualization/plot_meg_eeg_fields_3d.py
@@ -14,11 +14,11 @@ This process can be computationally intensive.
 
 # License: BSD (3-clause)
 
-print(__doc__)
-
 from mne.datasets import sample
 from mne import make_field_map, read_evokeds
 
+print(__doc__)
+
 data_path = sample.data_path()
 subjects_dir = data_path + '/subjects'
 evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
@@ -30,8 +30,17 @@ evoked = read_evokeds(evoked_fname, condition=condition, baseline=(-0.2, 0.0))
 
 # Compute the field maps to project MEG and EEG data to MEG helmet
 # and scalp surface
-maps = make_field_map(evoked, trans_fname=trans_fname, subject='sample',
+maps = make_field_map(evoked, trans_fname, subject='sample',
                       subjects_dir=subjects_dir, n_jobs=1)
 
-# explore several points in time
-[evoked.plot_field(maps, time=time) for time in [0.09, .11]]
+# Plot MEG and EEG fields in the helmet and scalp surface in the same figure.
+evoked.plot_field(maps, time=0.11)
+
+# Compute the MEG fields in the scalp surface
+evoked.pick_types(meg=True, eeg=False)
+maps_head = make_field_map(evoked, trans_fname, subject='sample',
+                           subjects_dir=subjects_dir, n_jobs=1,
+                           meg_surf='head')
+
+# Plot MEG fields both in scalp surface and the helmet in the same figure.
+evoked.plot_field([maps_head[0], maps[1]], time=0.11)
diff --git a/examples/plot_ssp_projs_sensitivity_map.py b/examples/visualization/plot_ssp_projs_sensitivity_map.py
similarity index 89%
rename from examples/plot_ssp_projs_sensitivity_map.py
rename to examples/visualization/plot_ssp_projs_sensitivity_map.py
index 878a15e..8cea492 100644
--- a/examples/plot_ssp_projs_sensitivity_map.py
+++ b/examples/visualization/plot_ssp_projs_sensitivity_map.py
@@ -10,10 +10,13 @@ similar to the first SSP vector correcting for ECG.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
+import matplotlib.pyplot as plt
 
 from mne import read_forward_solution, read_proj, sensitivity_map
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 subjects_dir = data_path + '/subjects'
@@ -30,10 +33,9 @@ ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
 ###############################################################################
 # Show sensitivity map
 
-import matplotlib.pyplot as plt
 plt.hist(ssp_ecg_map.data.ravel())
 plt.show()
 
-args = dict(fmin=0.2, fmid=0.6, fmax=1., smoothing_steps=7, hemi='rh',
-            subjects_dir=subjects_dir)
+args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7,
+            hemi='rh', subjects_dir=subjects_dir)
 ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
diff --git a/examples/plot_ssp_projs_topomaps.py b/examples/visualization/plot_ssp_projs_topomaps.py
similarity index 79%
rename from examples/plot_ssp_projs_topomaps.py
rename to examples/visualization/plot_ssp_projs_topomaps.py
index c86a666..0b7c6b2 100644
--- a/examples/plot_ssp_projs_topomaps.py
+++ b/examples/visualization/plot_ssp_projs_topomaps.py
@@ -8,14 +8,15 @@ The projections used are the ones correcting for ECG artifacts.
 """
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #         Denis A. Engemann <denis.engemann at gmail.com>
+#         Teon Brooks <teon.brooks at gmail.com>
 
 # License: BSD (3-clause)
 
+from mne import read_proj, read_evokeds
+from mne.datasets import sample
+
 print(__doc__)
 
-from mne import read_proj, find_layout, read_evokeds
-from mne.datasets import sample
-from mne import viz
 data_path = sample.data_path()
 
 ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
@@ -23,7 +24,6 @@ ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
 
 evoked = read_evokeds(ave_fname, condition='Left Auditory')
 projs = read_proj(ecg_fname)
+evoked.add_proj(projs)
 
-layouts = [find_layout(evoked.info, k) for k in 'meg', 'eeg']
-
-viz.plot_projs_topomap(projs, layout=layouts)
+evoked.plot_projs_topomap()
diff --git a/examples/plot_topo_channel_epochs_image.py b/examples/visualization/plot_topo_channel_epochs_image.py
similarity index 99%
rename from examples/plot_topo_channel_epochs_image.py
rename to examples/visualization/plot_topo_channel_epochs_image.py
index a08c699..d1d2d97 100644
--- a/examples/plot_topo_channel_epochs_image.py
+++ b/examples/visualization/plot_topo_channel_epochs_image.py
@@ -9,8 +9,6 @@ potential / field (ERP/ERF) images.
 One sensor topography plot is produced with the evoked field images from
 the selected channels.
 """
-print(__doc__)
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Denis Engemann <denis.engemann at gmail.com>
 #
@@ -21,6 +19,9 @@ import matplotlib.pyplot as plt
 import mne
 from mne import io
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
diff --git a/examples/plot_topo_compare_conditions.py b/examples/visualization/plot_topo_compare_conditions.py
similarity index 80%
rename from examples/plot_topo_compare_conditions.py
rename to examples/visualization/plot_topo_compare_conditions.py
index b929d68..b163dba 100644
--- a/examples/plot_topo_compare_conditions.py
+++ b/examples/visualization/plot_topo_compare_conditions.py
@@ -16,14 +16,16 @@ evoked responses.
 
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import matplotlib.pyplot as plt
 import mne
 
 from mne.io import Raw
-from mne.viz import plot_topo
+from mne.viz import plot_evoked_topo
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 ###############################################################################
@@ -50,22 +52,25 @@ picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
                        include=include, exclude='bads')
 
 # Create epochs including different events
-epochs = mne.Epochs(raw, events, dict(audio_l=1, visual_r=3), tmin, tmax,
+event_id = {'audio/left': 1, 'audio/right': 2,
+            'visual/left': 3, 'visual/right': 4}
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                     picks=picks, baseline=(None, 0), reject=reject)
 
 # Generate list of evoked objects from conditions names
-evokeds = [epochs[name].average() for name in 'audio_l', 'visual_r']
+evokeds = [epochs[name].average() for name in ('left', 'right')]
 
 ###############################################################################
 # Show topography for two different conditions
 
 colors = 'yellow', 'green'
-title = 'MNE sample data - left auditory and visual'
+title = 'MNE sample data - left vs right (A/V combined)'
 
-plot_topo(evokeds, color=colors, title=title)
+plot_evoked_topo(evokeds, color=colors, title=title)
 
 conditions = [e.comment for e in evokeds]
 for cond, col, pos in zip(conditions, colors, (0.025, 0.07)):
-    plt.figtext(0.775, pos, cond, color=col, fontsize=12)
+    plt.figtext(0.99, pos, cond, color=col, fontsize=12,
+                horizontalalignment='right')
 
 plt.show()
diff --git a/examples/plot_topo_customized.py b/examples/visualization/plot_topo_customized.py
similarity index 100%
rename from examples/plot_topo_customized.py
rename to examples/visualization/plot_topo_customized.py
index d8c4003..44bda9a 100644
--- a/examples/plot_topo_customized.py
+++ b/examples/visualization/plot_topo_customized.py
@@ -14,18 +14,18 @@ layout.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import numpy as np
-import mne
+import matplotlib.pyplot as plt
 
+import mne
 from mne.viz import iter_topography
 from mne import io
 from mne.time_frequency import compute_raw_psd
+from mne.datasets import sample
 
-import matplotlib.pyplot as plt
+print(__doc__)
 
-from mne.datasets import sample
 data_path = sample.data_path()
 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
 
diff --git a/examples/plot_topography.py b/examples/visualization/plot_topography.py
similarity index 100%
rename from examples/plot_topography.py
rename to examples/visualization/plot_topography.py
index 4bb04cd..1cb104a 100644
--- a/examples/plot_topography.py
+++ b/examples/visualization/plot_topography.py
@@ -4,18 +4,18 @@ Plot topographies for MEG sensors
 =================================
 
 """
-
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import matplotlib.pyplot as plt
 
 from mne import read_evokeds
 from mne.viz import plot_topo
 from mne.datasets import sample
+
+print(__doc__)
+
 data_path = sample.data_path()
 
 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
diff --git a/logo/generate_mne_logos.py b/logo/generate_mne_logos.py
new file mode 100644
index 0000000..39f5ef4
--- /dev/null
+++ b/logo/generate_mne_logos.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+"""
+===============================================================================
+Script 'mne logo'
+===============================================================================
+
+This script makes the logo for MNE.
+"""
+# @author: drmccloy
+# Created on Mon Jul 20 11:28:16 2015
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+import matplotlib.pyplot as plt
+from matplotlib import rcParams
+from matplotlib.mlab import bivariate_normal
+from matplotlib.path import Path
+from matplotlib.text import TextPath
+from matplotlib.patches import PathPatch
+from matplotlib.colors import LinearSegmentedColormap
+from matplotlib.transforms import Bbox
+
+# manually set values
+dpi = 72.
+center_fudge = np.array([2, 0])  # compensate for font bounding box padding
+tagline_scale_fudge = 0.98  # to get justification right
+tagline_offset_fudge = np.array([0.4, 0])
+
+static_dir = op.join('..', 'doc', '_static')
+
+# font, etc
+rcp = {'font.sans-serif': ['Primetime'], 'font.style': 'normal',
+       'font.weight': 'black', 'font.variant': 'normal', 'figure.dpi': dpi,
+       'savefig.dpi': dpi, 'contour.negative_linestyle': 'solid'}
+plt.rcdefaults()
+rcParams.update(rcp)
+
+# initialize figure (no axes, margins, etc)
+fig = plt.figure(1, figsize=(5, 3), frameon=False, dpi=dpi)
+ax = plt.Axes(fig, [0., 0., 1., 1.])
+ax.set_axis_off()
+fig.add_axes(ax)
+
+# fake field data
+delta = 0.1
+x = np.arange(-8.0, 8.0, delta)
+y = np.arange(-3.0, 3.0, delta)
+X, Y = np.meshgrid(x, y)
+Z1 = bivariate_normal(X, Y, 8.0, 7.0, -5.0, 0.9, 1.0)
+Z2 = bivariate_normal(X, Y, 15.0, 2.5, 2.6, -2.5, 2.5)
+Z = Z2 - 0.7 * Z1
+
+# color map: field gradient (yellow-red-transparent-blue-cyan)
+yrtbc = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)),
+         'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)),
+         'green': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)),
+         'alpha': ((0.0, 1.0, 1.0), (0.4, 0.8, 0.8), (0.5, 0.2, 0.2),
+                   (0.6, 0.8, 0.8), (1.0, 1.0, 1.0))}
+# color map: field lines (red | blue)
+redbl = {'red': ((0., 1., 1.), (0.5, 1., 0.), (1., 0., 0.)),
+         'blue': ((0., 0., 0.), (0.5, 0., 1.), (1., 1., 1.)),
+         'green': ((0., 0., 0.), (1., 0., 0.)),
+         'alpha': ((0., 0.4, 0.4), (1., 0.4, 0.4))}
+mne_field_grad_cols = LinearSegmentedColormap('mne_grad', yrtbc)
+mne_field_line_cols = LinearSegmentedColormap('mne_line', redbl)
+
+# plot gradient and contour lines
+im = plt.imshow(Z, cmap=mne_field_grad_cols, aspect='equal')
+cs = plt.contour(Z, 9, cmap=mne_field_line_cols, linewidths=1)
+plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
+
+# create MNE clipping mask
+mne_path = TextPath((0, 0), 'MNE')
+dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
+vert = mne_path.vertices - dims / 2.
+mult = (plot_dims / dims).min()
+mult = [mult, -mult]  # y axis is inverted (origin at top left)
+offset = plot_dims / 2. - center_fudge
+mne_clip = Path(offset + vert * mult, mne_path.codes)
+# apply clipping mask to field gradient and lines
+im.set_clip_path(mne_clip, transform=im.get_transform())
+for coll in cs.collections:
+    coll.set_clip_path(mne_clip, transform=im.get_transform())
+# get final position of clipping mask
+mne_corners = mne_clip.get_extents().corners()
+
+# add tagline
+rcParams.update({'font.sans-serif': ['Cooper Hewitt'], 'font.weight': 'light'})
+tag_path = TextPath((0, 0), 'MEG + EEG  ANALYSIS & VISUALIZATION')
+dims = tag_path.vertices.max(0) - tag_path.vertices.min(0)
+vert = tag_path.vertices - dims / 2.
+mult = tagline_scale_fudge * (plot_dims / dims).min()
+mult = [mult, -mult]  # y axis is inverted
+offset = mne_corners[-1] - np.array([mne_clip.get_extents().size[0] / 2.,
+                                     -dims[1]]) - tagline_offset_fudge
+tag_clip = Path(offset + vert * mult, tag_path.codes)
+tag_patch = PathPatch(tag_clip, facecolor='k', edgecolor='none', zorder=10)
+ax.add_patch(tag_patch)
+yl = ax.get_ylim()
+yy = np.max([tag_clip.vertices.max(0)[-1],
+             tag_clip.vertices.min(0)[-1]])
+ax.set_ylim(np.ceil(yy), yl[-1])
+
+# only save actual image extent plus a bit of padding
+extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
+extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
+plt.draw()
+plt.savefig(op.join(static_dir, 'mne_logo.png'),
+            bbox_inches=extent.expanded(1.2, 1.))
+plt.close()
+
+# 92x22 image
+w_px = 92
+h_px = 22
+center_fudge = np.array([12, 0.5])
+scale_fudge = 2.1
+rcParams.update({'font.sans-serif': ['Primetime'], 'font.weight': 'black'})
+x = np.linspace(-8., 8., w_px / 2.)
+y = np.linspace(-3., 3., h_px / 2.)
+X, Y = np.meshgrid(x, y)
+# initialize figure (no axes, margins, etc)
+fig = plt.figure(1, figsize=(w_px / dpi, h_px / dpi), frameon=False, dpi=dpi)
+ax = plt.Axes(fig, [0., 0., 1., 1.])
+ax.set_axis_off()
+fig.add_axes(ax)
+# plot rainbow
+im = plt.imshow(X, cmap=mne_field_grad_cols, aspect='equal')
+plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
+# MNE text in white
+mne_path = TextPath((0, 0), 'MNE')
+dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
+vert = mne_path.vertices - dims / 2.
+mult = scale_fudge * (plot_dims / dims).min()
+mult = [mult, -mult]  # y axis is inverted (origin at top left)
+offset = np.array([scale_fudge, 1.]) * \
+    np.array([-dims[0], plot_dims[-1]]) / 2. - center_fudge
+mne_clip = Path(offset + vert * mult, mne_path.codes)
+mne_patch = PathPatch(mne_clip, facecolor='w', edgecolor='none', zorder=10)
+ax.add_patch(mne_patch)
+# adjust xlim and ylim
+mne_corners = mne_clip.get_extents().corners()
+xmin, ymin = np.min(mne_corners, axis=0)
+xmax, ymax = np.max(mne_corners, axis=0)
+xl = ax.get_xlim()
+yl = ax.get_ylim()
+xpad = np.abs(np.diff([xmin, xl[1]])) / 20.
+ypad = np.abs(np.diff([ymax, ymin])) / 20.
+ax.set_xlim(xmin - xpad, xl[1] + xpad)
+ax.set_ylim(ymax + ypad, ymin - ypad)
+extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
+extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
+plt.draw()
+plt.savefig(op.join(static_dir, 'mne_logo_small.png'), transparent=True,
+            bbox_inches=extent)
+plt.close()
diff --git a/make/install_python.ps1 b/make/install_python.ps1
new file mode 100644
index 0000000..23b996f
--- /dev/null
+++ b/make/install_python.ps1
@@ -0,0 +1,93 @@
+# Sample script to install Python and pip under Windows
+# Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner
+# License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/
+
+$MINICONDA_URL = "http://repo.continuum.io/miniconda/"
+$BASE_URL = "https://www.python.org/ftp/python/"
+
+
+function DownloadMiniconda ($python_version, $platform_suffix) {
+    $webclient = New-Object System.Net.WebClient
+    if ($python_version -eq "3.4") {
+        $filename = "Miniconda3-latest-Windows-" + $platform_suffix + ".exe"
+    } else {
+        $filename = "Miniconda-latest-Windows-" + $platform_suffix + ".exe"
+    }
+    $url = $MINICONDA_URL + $filename
+
+    $basedir = $pwd.Path + "\"
+    $filepath = $basedir + $filename
+    if (Test-Path $filename) {
+        Write-Host "Reusing" $filepath
+        return $filepath
+    }
+
+    # Download and retry up to 3 times in case of network transient errors.
+    Write-Host "Downloading" $filename "from" $url
+    $retry_attempts = 2
+    for($i=0; $i -lt $retry_attempts; $i++){
+        try {
+            $webclient.DownloadFile($url, $filepath)
+            break
+        }
+        Catch [Exception]{
+            Start-Sleep 1
+        }
+   }
+   if (Test-Path $filepath) {
+       Write-Host "File saved at" $filepath
+   } else {
+       # Retry once to get the error message if any at the last try
+       $webclient.DownloadFile($url, $filepath)
+   }
+   return $filepath
+}
+
+
+function InstallMiniconda ($python_version, $architecture, $python_home) {
+    Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home
+    if (Test-Path $python_home) {
+        Write-Host $python_home "already exists, skipping."
+        return $false
+    }
+    if ($architecture -eq "32") {
+        $platform_suffix = "x86"
+    } else {
+        $platform_suffix = "x86_64"
+    }
+    $filepath = DownloadMiniconda $python_version $platform_suffix
+    Write-Host "Installing" $filepath "to" $python_home
+    $install_log = $python_home + ".log"
+    $args = "/S /D=$python_home"
+    Write-Host $filepath $args
+    Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru
+    if (Test-Path $python_home) {
+        Write-Host "Python $python_version ($architecture) installation complete"
+    } else {
+        Write-Host "Failed to install Python in $python_home"
+        Get-Content -Path $install_log
+        Exit 1
+    }
+}
+
+
+function InstallMinicondaPip ($python_home) {
+    $pip_path = $python_home + "\Scripts\pip.exe"
+    $conda_path = $python_home + "\Scripts\conda.exe"
+    if (-not(Test-Path $pip_path)) {
+        Write-Host "Installing pip..."
+        $args = "install --yes pip"
+        Write-Host $conda_path $args
+        Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru
+    } else {
+        Write-Host "pip already installed."
+    }
+}
+
+
+function main () {
+    InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON
+    InstallMinicondaPip $env:PYTHON
+}
+
+main
diff --git a/mne/__init__.py b/mne/__init__.py
index 2ae0043..dfecae9 100644
--- a/mne/__init__.py
+++ b/mne/__init__.py
@@ -1,21 +1,42 @@
 """MNE for MEG and EEG data analysis
 """
 
-__version__ = '0.8.6'
+# PEP0440 compatible formatted version, see:
+# https://www.python.org/dev/peps/pep-0440/
+#
+# Generic release markers:
+#   X.Y
+#   X.Y.Z   # For bugfix releases
+#
+# Admissible pre-release markers:
+#   X.YaN   # Alpha release
+#   X.YbN   # Beta release
+#   X.YrcN  # Release Candidate
+#   X.Y     # Final release
+#
+# Dev branch marker is: 'X.Y.devN' where N is an integer.
+#
+
+__version__ = '0.10.dev0'
 
 # have to import verbose first since it's needed by many things
 from .utils import (set_log_level, set_log_file, verbose, set_config,
                     get_config, get_config_path, set_cache_dir,
                     set_memmap_min_size)
-from .io.pick import (pick_types, pick_channels, pick_types_evoked,
+from .io.pick import (pick_types, pick_channels,
                       pick_channels_regexp, pick_channels_forward,
                       pick_types_forward, pick_channels_cov,
                       pick_channels_evoked, pick_info)
-from .io.base import concatenate_raws, get_chpi_positions
+from .io.base import concatenate_raws
+from .chpi import get_chpi_positions
 from .io.meas_info import create_info
+from .io.kit import read_epochs_kit
+from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
+                  read_bem_surfaces, write_bem_surface, write_bem_surfaces,
+                  read_bem_solution, write_bem_solution)
 from .cov import (read_cov, write_cov, Covariance,
                   compute_covariance, compute_raw_data_covariance,
-                  whiten_evoked)
+                  compute_raw_covariance, whiten_evoked, make_ad_hoc_cov)
 from .event import (read_events, write_events, find_events, merge_events,
                     pick_events, make_fixed_length_events, concatenate_events,
                     find_stim_steps)
@@ -23,7 +44,7 @@ from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
                       do_forward_solution, average_forward_solutions,
                       write_forward_solution, make_forward_solution,
                       convert_forward_solution, make_field_map)
-from .source_estimate import (read_source_estimate,
+from .source_estimate import (read_source_estimate, MixedSourceEstimate,
                               SourceEstimate, VolSourceEstimate, morph_data,
                               morph_data_precomputed, compute_morph_matrix,
                               grade_to_tris, grade_to_vertices,
@@ -34,48 +55,44 @@ from .source_estimate import (read_source_estimate,
                               spatio_temporal_tris_connectivity,
                               spatio_temporal_dist_connectivity,
                               save_stc_as_volume, extract_label_time_course)
-from .surface import (read_bem_surfaces, read_surface, write_bem_surface,
-                      write_surface, decimate_surface, read_morph_map,
-                      read_bem_solution, get_head_surf,
-                      get_meg_helmet_surf)
+from .surface import (read_surface, write_surface, decimate_surface,
+                      read_morph_map, get_head_surf, get_meg_helmet_surf)
 from .source_space import (read_source_spaces, vertex_to_mni,
                            write_source_spaces, setup_source_space,
-                           setup_volume_source_space,
-                           add_source_space_distances)
+                           setup_volume_source_space, SourceSpaces,
+                           add_source_space_distances, morph_source_spaces,
+                           get_volume_labels_from_aseg)
 from .epochs import Epochs, EpochsArray, read_epochs
-from .evoked import (Evoked, EvokedArray, read_evoked, write_evoked,
-                     read_evokeds, write_evokeds)
-from .label import (label_time_courses, read_label, label_sign_flip,
+from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds,
+                     grand_average, combine_evoked)
+from .label import (read_label, label_sign_flip,
                     write_label, stc_to_label, grow_labels, Label, split_label,
-                    BiHemiLabel, labels_from_parc, parc_from_labels,
-                    read_labels_from_annot, write_labels_to_annot)
+                    BiHemiLabel, read_labels_from_annot, write_labels_to_annot)
 from .misc import parse_config, read_reject_parameters
 from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
                     scale_source_space)
-from .transforms import (transform_coordinates, read_trans, write_trans,
-                         transform_surface_to)
+from .transforms import (read_trans, write_trans,
+                         transform_surface_to, Transform)
 from .proj import (read_proj, write_proj, compute_proj_epochs,
                    compute_proj_evoked, compute_proj_raw, sensitivity_map)
 from .selection import read_selection
-from .dipole import read_dip
-from .layouts.layout import find_layout
-from .channels import (equalize_channels, rename_channels,
-                       read_ch_connectivity)
+from .dipole import read_dipole, Dipole, fit_dipole
+from .channels import equalize_channels, rename_channels, find_layout
 
 from . import beamformer
+from . import channels
+from . import chpi
+from . import commands
 from . import connectivity
 from . import coreg
 from . import cuda
 from . import datasets
 from . import epochs
 from . import externals
-from . import fiff  # XXX : to be deprecated in 0.9
 from . import io
 from . import filter
 from . import gui
-from . import layouts
 from . import minimum_norm
-from . import mixed_norm
 from . import preprocessing
 from . import simulation
 from . import stats
@@ -87,7 +104,3 @@ from . import realtime
 # initialize logging
 set_log_level(None, False)
 set_log_file()
-
-# initialize CUDA
-if get_config('MNE_USE_CUDA', 'false').lower() == 'true':
-    cuda.init_cuda()
diff --git a/mne/_hdf5.py b/mne/_hdf5.py
deleted file mode 100644
index 738ec3e..0000000
--- a/mne/_hdf5.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# -*- coding: utf-8 -*-
-# Authors: Eric Larson <larson.eric.d at gmail.com>
-#
-# License: BSD (3-clause)
-
-import numpy as np
-from os import path as op
-
-from .utils import _check_pytables
-from .externals.six import string_types, text_type
-
-
-##############################################################################
-# WRITE
-
-def write_hdf5(fname, data, overwrite=False):
-    """Write python object to HDF5 format using Pytables
-
-    Parameters
-    ----------
-    fname : str
-        Filename to use.
-    data : object
-        Object to write. Can be of any of these types:
-            {ndarray, dict, list, tuple, int, float, str}
-        Note that dict objects must only have ``str`` keys.
-    overwrite : bool
-        If True, overwrite file (if it exists).
-    """
-    tb = _check_pytables()
-    if op.isfile(fname) and not overwrite:
-        raise IOError('file "%s" exists, use overwrite=True to overwrite'
-                      % fname)
-    o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
-    with o_f(fname, mode='w') as fid:
-        if hasattr(fid, 'create_group'):
-            c_g = fid.create_group
-            c_t = fid.create_table
-            c_c_a = fid.create_carray
-        else:
-            c_g = fid.createGroup
-            c_t = fid.createTable
-            c_c_a = fid.createCArray
-        filters = tb.Filters(complib='zlib', complevel=5)
-        write_params = (c_g, c_t, c_c_a, filters)
-        _triage_write('mnepython', data, fid.root, *write_params)
-
-
-def _triage_write(key, value, root, *write_params):
-    tb = _check_pytables()
-    create_group, create_table, create_c_array, filters = write_params
-    if isinstance(value, dict):
-        sub_root = create_group(root, key, 'dict')
-        for key, sub_value in value.items():
-            if not isinstance(key, string_types):
-                raise TypeError('All dict keys must be strings')
-            _triage_write('key_{0}'.format(key), sub_value, sub_root,
-                          *write_params)
-    elif isinstance(value, (list, tuple)):
-        title = 'list' if isinstance(value, list) else 'tuple'
-        sub_root = create_group(root, key, title)
-        for vi, sub_value in enumerate(value):
-            _triage_write('idx_{0}'.format(vi), sub_value, sub_root,
-                          *write_params)
-    elif isinstance(value, type(None)):
-        atom = tb.BoolAtom()
-        s = create_c_array(root, key, atom, (1,), title='None',
-                           filters=filters)
-        s[:] = False
-    elif isinstance(value, (int, float)):
-        if isinstance(value, int):
-            title = 'int'
-        else:  # isinstance(value, float):
-            title = 'float'
-        value = np.atleast_1d(value)
-        atom = tb.Atom.from_dtype(value.dtype)
-        s = create_c_array(root, key, atom, (1,),
-                           title=title, filters=filters)
-        s[:] = value
-    elif isinstance(value, string_types):
-        atom = tb.UInt8Atom()
-        if isinstance(value, text_type):  # unicode
-            value = np.fromstring(value.encode('utf-8'), np.uint8)
-            title = 'unicode'
-        else:
-            value = np.fromstring(value.encode('ASCII'), np.uint8)
-            title = 'ascii'
-        s = create_c_array(root, key, atom, (len(value),), title=title,
-                           filters=filters)
-        s[:] = value
-    elif isinstance(value, np.ndarray):
-        atom = tb.Atom.from_dtype(value.dtype)
-        s = create_c_array(root, key, atom, value.shape,
-                           title='ndarray', filters=filters)
-        s[:] = value
-    else:
-        raise TypeError('unsupported type %s' % type(value))
-
-
-##############################################################################
-# READ
-
-def read_hdf5(fname):
-    """Read python object from HDF5 format using Pytables
-
-    Parameters
-    ----------
-    fname : str
-        File to load.
-
-    Returns
-    -------
-    data : object
-        The loaded data. Can be of any type supported by ``write_hdf5``.
-    """
-    tb = _check_pytables()
-    if not op.isfile(fname):
-        raise IOError('file "%s" not found' % fname)
-    o_f = tb.open_file if hasattr(tb, 'open_file') else tb.openFile
-    with o_f(fname, mode='r') as fid:
-        if not hasattr(fid.root, 'mnepython'):
-            raise TypeError('no mne-python data found')
-        data = _triage_read(fid.root.mnepython)
-    return data
-
-
-def _triage_read(node):
-    tb = _check_pytables()
-    type_str = node._v_title
-    if isinstance(node, tb.Group):
-        if type_str == 'dict':
-            data = dict()
-            for subnode in node:
-                key = subnode._v_name[4:]  # cut off "idx_" or "key_" prefix
-                data[key] = _triage_read(subnode)
-        elif type_str in ['list', 'tuple']:
-            data = list()
-            ii = 0
-            while True:
-                subnode = getattr(node, 'idx_{0}'.format(ii), None)
-                if subnode is None:
-                    break
-                data.append(_triage_read(subnode))
-                ii += 1
-            assert len(data) == ii
-            data = tuple(data) if type_str == 'tuple' else data
-            return data
-        else:
-            raise NotImplementedError('Unknown group type: {0}'
-                                      ''.format(type_str))
-    elif type_str == 'ndarray':
-        data = np.array(node)
-    elif type_str in ('int', 'float'):
-        if type_str == 'int':
-            cast = int
-        else:  # type_str == 'float':
-            cast = float
-        data = cast(np.array(node)[0])
-    elif type_str in ('unicode', 'ascii'):
-        decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
-        cast = text_type if type_str == 'unicode' else str
-        data = cast(np.array(node).tostring().decode(decoder))
-    elif type_str == 'None':
-        data = None
-    else:
-        raise TypeError('Unknown node type: {0}'.format(type_str))
-    return data
diff --git a/mne/baseline.py b/mne/baseline.py
index 6bd204d..7436587 100644
--- a/mne/baseline.py
+++ b/mne/baseline.py
@@ -29,11 +29,13 @@ def rescale(data, times, baseline, mode, verbose=None, copy=True):
         and if b is None then b is set to the end of the interval.
         If baseline is equal ot (None, None) all the time
         interval is used. If None, no correction is applied.
-    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | 'zlogratio'
         Do baseline correction with ratio (power is divided by mean
         power during baseline) or zscore (power is divided by standard
         deviation of power during baseline after subtracting the mean,
         power = [power - mean(power_baseline)] / std(power_baseline)).
+        logratio is the same an mean but in log-scale, zlogratio is the
+        same as zscore but data is rendered in log-scale first.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     copy : bool
@@ -47,9 +49,10 @@ def rescale(data, times, baseline, mode, verbose=None, copy=True):
     if copy:
         data = data.copy()
 
-    valid_modes = ['logratio', 'ratio', 'zscore', 'mean', 'percent']
+    valid_modes = ('logratio', 'ratio', 'zscore', 'mean', 'percent',
+                   'zlogratio')
     if mode not in valid_modes:
-        raise Exception('mode should be any of : %s' % valid_modes)
+        raise Exception('mode should be any of : %s' % (valid_modes, ))
 
     if baseline is not None:
         logger.info("Applying baseline correction ... (mode: %s)" % mode)
@@ -82,6 +85,11 @@ def rescale(data, times, baseline, mode, verbose=None, copy=True):
         elif mode == 'percent':
             data -= mean
             data /= mean
+        elif mode == 'zlogratio':
+            data /= mean
+            data = np.log10(data)
+            std = np.std(data[..., imin:imax], axis=-1)[..., None]
+            data /= std
 
     else:
         logger.info("No baseline correction applied...")
diff --git a/mne/beamformer/__init__.py b/mne/beamformer/__init__.py
index ad8b9e0..75ea807 100644
--- a/mne/beamformer/__init__.py
+++ b/mne/beamformer/__init__.py
@@ -3,3 +3,4 @@
 
 from ._lcmv import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
 from ._dics import dics, dics_epochs, dics_source_power, tf_dics
+from ._rap_music import rap_music
diff --git a/mne/beamformer/_dics.py b/mne/beamformer/_dics.py
index b83ae1b..3f50e32 100644
--- a/mne/beamformer/_dics.py
+++ b/mne/beamformer/_dics.py
@@ -12,12 +12,11 @@ import numpy as np
 from scipy import linalg
 
 from ..utils import logger, verbose
-from ..io.pick import pick_types
 from ..forward import _subject_from_forward
-from ..minimum_norm.inverse import combine_xyz
-from ..source_estimate import SourceEstimate
+from ..minimum_norm.inverse import combine_xyz, _check_reference
+from ..source_estimate import _make_stc
 from ..time_frequency import CrossSpectralDensity, compute_epochs_csd
-from ._lcmv import _prepare_beamformer_input
+from ._lcmv import _prepare_beamformer_input, _setup_picks
 from ..externals import six
 
 
@@ -60,11 +59,11 @@ def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
 
     Returns
     -------
-    stc : SourceEstimate (or list of SourceEstimate)
-        Source time courses.
+    stc : SourceEstimate | VolSourceEstimate
+        Source time courses
     """
 
-    is_free_ori, picks, _, proj, vertno, G =\
+    is_free_ori, _, proj, vertno, G =\
         _prepare_beamformer_input(info, forward, label, picks, pick_ori)
 
     Cm = data_csd.data
@@ -134,8 +133,8 @@ def _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg,
         tstep = 1.0 / info['sfreq']
         if np.iscomplexobj(sol):
             sol = np.abs(sol)  # XXX : STC cannot contain (yet?) complex values
-        yield SourceEstimate(sol, vertices=vertno, tmin=tmin, tstep=tstep,
-                             subject=subject)
+        yield _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
+                        subject=subject)
 
     logger.info('[done]')
 
@@ -157,7 +156,7 @@ def dics(evoked, forward, noise_csd, data_csd, reg=0.01, label=None,
 
     Parameters
     ----------
-    evoked : Evooked
+    evoked : Evoked
         Evoked data.
     forward : dict
         Forward operator.
@@ -177,21 +176,29 @@ def dics(evoked, forward, noise_csd, data_csd, reg=0.01, label=None,
 
     Returns
     -------
-    stc : SourceEstimate
+    stc : SourceEstimate | VolSourceEstimate
         Source time courses
 
+    See Also
+    --------
+    dics_epochs
+
     Notes
     -----
     The original reference is:
     Gross et al. Dynamic imaging of coherent sources: Studying neural
     interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
     """
+    _check_reference(evoked)
     info = evoked.info
     data = evoked.data
     tmin = evoked.times[0]
 
+    picks = _setup_picks(picks=None, info=info, forward=forward)
+    data = data[picks]
+
     stc = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
-                      label=label, pick_ori=pick_ori)
+                      label=label, pick_ori=pick_ori, picks=picks)
     return six.advance_iterator(stc)
 
 
@@ -235,26 +242,29 @@ def dics_epochs(epochs, forward, noise_csd, data_csd, reg=0.01, label=None,
 
     Returns
     -------
-    stc: list | generator of SourceEstimate
+    stc: list | generator of SourceEstimate | VolSourceEstimate
         The source estimates for all epochs
 
+    See Also
+    --------
+    dics
+
     Notes
     -----
     The original reference is:
     Gross et al. Dynamic imaging of coherent sources: Studying neural
     interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
     """
+    _check_reference(epochs)
 
     info = epochs.info
     tmin = epochs.times[0]
 
-    # use only the good data channels
-    picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
-                       exclude='bads')
+    picks = _setup_picks(picks=None, info=info, forward=forward)
     data = epochs.get_data()[:, picks, :]
 
     stcs = _apply_dics(data, info, tmin, forward, noise_csd, data_csd, reg=reg,
-                       label=label, pick_ori=pick_ori)
+                       label=label, pick_ori=pick_ori, picks=picks)
 
     if not return_generator:
         stcs = list(stcs)
@@ -298,7 +308,7 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
 
     Returns
     -------
-    stc : SourceEstimate
+    stc : SourceEstimate | VolSourceEstimate
         Source power with frequency instead of time.
 
     Notes
@@ -314,9 +324,11 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
     if isinstance(noise_csds, CrossSpectralDensity):
         noise_csds = [noise_csds]
 
-    csd_shapes = lambda x: tuple(c.data.shape for c in x)
+    def csd_shapes(x):
+        return tuple(c.data.shape for c in x)
+
     if (csd_shapes(data_csds) != csd_shapes(noise_csds) or
-       any([len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds]])):
+       any(len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds])):
         raise ValueError('One noise CSD matrix should be provided for each '
                          'data CSD matrix and vice versa. All CSD matrices '
                          'should have identical shape.')
@@ -338,7 +350,7 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
     if len(frequencies) > 2:
         fstep = []
         for i in range(len(frequencies) - 1):
-            fstep.append(frequencies[i+1] - frequencies[i])
+            fstep.append(frequencies[i + 1] - frequencies[i])
         if not np.allclose(fstep, np.mean(fstep), 1e-5):
             warnings.warn('Uneven frequency spacing in CSD object, '
                           'frequencies in the resulting stc file will be '
@@ -349,8 +361,10 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
     else:
         fstep = 1  # dummy value
 
-    is_free_ori, picks, _, proj, vertno, G =\
-        _prepare_beamformer_input(info, forward, label, picks=None,
+    picks = _setup_picks(picks=None, info=info, forward=forward)
+
+    is_free_ori, _, proj, vertno, G =\
+        _prepare_beamformer_input(info, forward, label, picks=picks,
                                   pick_ori=pick_ori)
 
     n_orient = 3 if is_free_ori else 1
@@ -401,8 +415,8 @@ def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
     logger.info('[done]')
 
     subject = _subject_from_forward(forward)
-    return SourceEstimate(source_power, vertices=vertno, tmin=fmin / 1000.,
-                          tstep=fstep / 1000., subject=subject)
+    return _make_stc(source_power, vertices=vertno, tmin=fmin / 1000.,
+                     tstep=fstep / 1000., subject=subject)
 
 
 @verbose
@@ -446,6 +460,8 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
         tf source grid.
     mode : str
         Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
+    n_ffts : list | None
+        FFT lengths to use for each frequency bin.
     mt_bandwidths : list of float
         The bandwidths of the multitaper windowing function in Hz. Only used in
         'multitaper' mode. One value should be provided for each frequency bin.
@@ -467,7 +483,7 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
 
     Returns
     -------
-    stcs : list of SourceEstimate
+    stcs : list of SourceEstimate | VolSourceEstimate
         Source power at each time window. One SourceEstimate object is returned
         for each frequency bin.
 
@@ -481,6 +497,7 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
     NOTE : Dalal et al. used a synthetic aperture magnetometry beamformer (SAM)
     in each time-frequency window instead of DICS.
     """
+    _check_reference(epochs)
 
     if pick_ori not in [None, 'normal']:
         raise ValueError('Unrecognized orientation option in pick_ori, '
@@ -587,8 +604,8 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
     # Creating stc objects containing all time points for each frequency bin
     stcs = []
     for i_freq, _ in enumerate(freq_bins):
-        stc = SourceEstimate(sol_final[i_freq, :, :].T, vertices=stc.vertno,
-                             tmin=tmin, tstep=tstep, subject=stc.subject)
+        stc = _make_stc(sol_final[i_freq, :, :].T, vertices=stc.vertices,
+                        tmin=tmin, tstep=tstep, subject=stc.subject)
         stcs.append(stc)
 
     return stcs
diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py
index 00a895a..4e2b2fe 100644
--- a/mne/beamformer/_lcmv.py
+++ b/mne/beamformer/_lcmv.py
@@ -13,9 +13,10 @@ from scipy import linalg
 
 from ..io.constants import FIFF
 from ..io.proj import make_projector
-from ..io.pick import pick_types, pick_channels_forward, pick_channels_cov
+from ..io.pick import (
+    pick_types, pick_channels_forward, pick_channels_cov, pick_info)
 from ..forward import _subject_from_forward
-from ..minimum_norm.inverse import _get_vertno, combine_xyz
+from ..minimum_norm.inverse import _get_vertno, combine_xyz, _check_reference
 from ..cov import compute_whitener, compute_covariance
 from ..source_estimate import _make_stc, SourceEstimate
 from ..source_space import label_src_vertno_sel
@@ -24,9 +25,37 @@ from .. import Epochs
 from ..externals import six
 
 
+def _setup_picks(picks, info, forward, noise_cov=None):
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+
+    ok_ch_names = set([c['ch_name'] for c in forward['info']['chs']])
+    if noise_cov is not None:
+        ok_ch_names.union(set(noise_cov.ch_names))
+
+    if noise_cov is not None and set(info['bads']) != set(noise_cov['bads']):
+        logger.info('info["bads"] and noise_cov["bads"] do not match, '
+                    'excluding bad channels from both')
+
+    bads = set(info['bads'])
+    if noise_cov is not None:
+        bads.union(set(noise_cov['bads']))
+
+    ok_ch_names -= bads
+
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
+    ch_names = [c for c in ch_names if c in ok_ch_names]
+
+    picks = [info['ch_names'].index(k) for k in ch_names if k in
+             info['ch_names']]
+    return picks
+
+
 @verbose
 def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                label=None, picks=None, pick_ori=None, verbose=None):
+                label=None, picks=None, pick_ori=None, rank=None,
+                verbose=None):
     """ LCMV beamformer for evoked data, single epochs, and raw data
 
     Parameters
@@ -56,6 +85,11 @@ def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
         If 'normal', rather than pooling the orientations by taking the norm,
         only the radial component is kept. If 'max-power', the source
         orientation that maximizes output source power is chosen.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -64,12 +98,12 @@ def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
     stc : SourceEstimate | VolSourceEstimate (or list of thereof)
         Source time courses.
     """
-
-    is_free_ori, picks, ch_names, proj, vertno, G =\
-        _prepare_beamformer_input(info, forward, label, picks, pick_ori)
+    is_free_ori, ch_names, proj, vertno, G = (
+        _prepare_beamformer_input(
+            info, forward, label, picks, pick_ori))
 
     # Handle whitening + data covariance
-    whitener, _ = compute_whitener(noise_cov, info, picks)
+    whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
 
     # whiten the leadfield
     G = np.dot(whitener, G)
@@ -205,13 +239,8 @@ def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
                          'forward operator with a surface-based source space '
                          'is used.')
 
-    if picks is None:
-        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
-                           exclude='bads')
-
-    ch_names = [info['ch_names'][k] for k in picks]
-
     # Restrict forward solution to selected channels
+    ch_names = [info['chs'][k]['ch_name'] for k in picks]
     forward = pick_channels_forward(forward, include=ch_names)
 
     # Get gain matrix (forward operator)
@@ -233,12 +262,12 @@ def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
     if info['projs']:
         G = np.dot(proj, G)
 
-    return is_free_ori, picks, ch_names, proj, vertno, G
+    return is_free_ori, ch_names, proj, vertno, G
 
 
 @verbose
 def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
-         pick_ori=None, verbose=None):
+         pick_ori=None, picks=None, rank=None, verbose=None):
     """Linearly Constrained Minimum Variance (LCMV) beamformer.
 
     Compute Linearly Constrained Minimum Variance (LCMV) beamformer
@@ -265,6 +294,14 @@ def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
         If 'normal', rather than pooling the orientations by taking the norm,
         only the radial component is kept. If 'max-power', the source
         orientation that maximizes output source power is chosen.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -273,6 +310,10 @@ def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
     stc : SourceEstimate | VolSourceEstimate
         Source time courses
 
+    See Also
+    --------
+    lcmv_raw, lcmv_epochs
+
     Notes
     -----
     The original reference is:
@@ -285,20 +326,28 @@ def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
     beamformers for neuromagnetic source reconstruction.
     Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
     """
+    _check_reference(evoked)
 
     info = evoked.info
     data = evoked.data
     tmin = evoked.times[0]
 
-    stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                      label, pick_ori=pick_ori)
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data = data[picks]
+
+    stc = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
 
     return six.advance_iterator(stc)
 
 
 @verbose
 def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
-                pick_ori=None, return_generator=False, verbose=None):
+                pick_ori=None, return_generator=False, picks=None, rank=None,
+                verbose=None):
     """Linearly Constrained Minimum Variance (LCMV) beamformer.
 
     Compute Linearly Constrained Minimum Variance (LCMV) beamformer
@@ -328,6 +377,14 @@ def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
     return_generator : bool
         Return a generator object instead of a list. This allows iterating
         over the stcs without having to keep them all in memory.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -336,6 +393,10 @@ def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
     stc: list | generator of (SourceEstimate | VolSourceEstimate)
         The source estimates for all epochs
 
+    See Also
+    --------
+    lcmv_raw, lcmv
+
     Notes
     -----
     The original reference is:
@@ -348,17 +409,19 @@ def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
     beamformers for neuromagnetic source reconstruction.
     Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
     """
+    _check_reference(epochs)
 
     info = epochs.info
     tmin = epochs.times[0]
 
-    # use only the good data channels
-    picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
-                       exclude='bads')
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
     data = epochs.get_data()[:, picks, :]
 
-    stcs = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                       label, pick_ori=pick_ori)
+    stcs = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
 
     if not return_generator:
         stcs = [s for s in stcs]
@@ -368,7 +431,8 @@ def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
 
 @verbose
 def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
-             start=None, stop=None, picks=None, pick_ori=None, verbose=None):
+             start=None, stop=None, picks=None, pick_ori=None, rank=None,
+             verbose=None):
     """Linearly Constrained Minimum Variance (LCMV) beamformer.
 
     Compute Linearly Constrained Minimum Variance (LCMV) beamformer
@@ -396,12 +460,17 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
     stop : int
         Index of first time sample not to include (index not time is seconds).
     picks : array-like of int
-        Channel indices in raw to use for beamforming (if None all channels
+        Channel indices to use for beamforming (if None all channels
         are used except bad channels).
     pick_ori : None | 'normal' | 'max-power'
         If 'normal', rather than pooling the orientations by taking the norm,
         only the radial component is kept. If 'max-power', the source
         orientation that maximizes output source power is chosen.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -410,6 +479,10 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
     stc : SourceEstimate | VolSourceEstimate
         Source time courses
 
+    See Also
+    --------
+    lcmv, lcmv_epochs
+
     Notes
     -----
     The original reference is:
@@ -422,25 +495,27 @@ def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
     beamformers for neuromagnetic source reconstruction.
     Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
     """
+    _check_reference(raw)
 
     info = raw.info
 
-    if picks is None:
-        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
-                           exclude='bads')
+    picks = _setup_picks(picks, info, forward, noise_cov)
 
     data, times = raw[picks, start:stop]
     tmin = times[0]
 
-    stc = _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
-                      label, picks, pick_ori)
+    stc = _apply_lcmv(
+        data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
+        data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
+        pick_ori=pick_ori)
 
     return six.advance_iterator(stc)
 
 
 @verbose
 def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
-                       label=None, picks=None, pick_ori=None, verbose=None):
+                       label=None, picks=None, pick_ori=None,
+                       rank=None, verbose=None):
     """Linearly Constrained Minimum Variance (LCMV) beamformer.
 
     Calculate source power in a time window based on the provided data
@@ -471,6 +546,11 @@ def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
     pick_ori : None | 'normal'
         If 'normal', rather than pooling the orientations by taking the norm,
         only the radial component is kept.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -487,12 +567,19 @@ def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
     constrained minimum variance spatial filtering.
     Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
     """
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
 
-    is_free_ori, picks, ch_names, proj, vertno, G =\
-        _prepare_beamformer_input(info, forward, label, picks, pick_ori)
+    is_free_ori, ch_names, proj, vertno, G =\
+        _prepare_beamformer_input(
+            info, forward, label, picks, pick_ori)
 
     # Handle whitening
-    whitener, _ = compute_whitener(noise_cov, info, picks)
+    info = pick_info(
+        info, [info['ch_names'].index(k) for k in ch_names
+               if k in info['ch_names']])
+    whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
 
     # whiten the leadfield
     G = np.dot(whitener, G)
@@ -549,7 +636,7 @@ def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
 @verbose
 def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
             freq_bins, subtract_evoked=False, reg=0.01, label=None,
-            pick_ori=None, n_jobs=1, verbose=None):
+            pick_ori=None, n_jobs=1, picks=None, rank=None, verbose=None):
     """5D time-frequency beamforming based on LCMV.
 
     Calculate source power in time-frequency windows using a spatial filter
@@ -594,6 +681,14 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
     n_jobs : int | str
         Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
         is installed properly and CUDA is initialized.
+    picks : array-like of int
+        Channel indices to use for beamforming (if None all channels
+        are used except bad channels).
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -610,6 +705,7 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
     time-frequency dynamics of cortical activity.
     NeuroImage (2008) vol. 40 (4) pp. 1686-1700
     """
+    _check_reference(epochs)
 
     if pick_ori not in [None, 'normal']:
         raise ValueError('Unrecognized orientation option in pick_ori, '
@@ -624,13 +720,17 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
                          'window lengths')
 
     # Extract raw object from the epochs object
-    raw = epochs.raw
+    raw = epochs._raw
     if raw is None:
         raise ValueError('The provided epochs object does not contain the '
                          'underlying raw object. Please use preload=False '
                          'when constructing the epochs object')
+
+    picks = _setup_picks(picks, epochs.info, forward, noise_covs[0])
+    ch_names = [epochs.ch_names[k] for k in picks]
+
     # Use picks from epochs for picking channels in the raw object
-    raw_picks = [raw.ch_names.index(c) for c in epochs.ch_names]
+    raw_picks = [raw.ch_names.index(c) for c in ch_names]
 
     # Make sure epochs.events contains only good events:
     epochs.drop_bad_epochs()
@@ -714,7 +814,7 @@ def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
     # Creating stc objects containing all time points for each frequency bin
     stcs = []
     for i_freq, _ in enumerate(freq_bins):
-        stc = SourceEstimate(sol_final[i_freq, :, :].T, vertices=stc.vertno,
+        stc = SourceEstimate(sol_final[i_freq, :, :].T, vertices=stc.vertices,
                              tmin=tmin, tstep=tstep, subject=stc.subject)
         stcs.append(stc)
 
diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py
new file mode 100644
index 0000000..5e96da7
--- /dev/null
+++ b/mne/beamformer/_rap_music.py
@@ -0,0 +1,274 @@
+"""Compute a Recursively Applied and Projected MUltiple
+Signal Classification (RAP-MUSIC).
+"""
+
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy import linalg
+
+from ..io.pick import pick_channels_evoked
+from ..cov import compute_whitener
+from ..utils import logger, verbose
+from ..dipole import Dipole
+from ._lcmv import _prepare_beamformer_input, _setup_picks
+
+
+def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2,
+                     picks=None, return_explained_data=False):
+    """RAP-MUSIC for evoked data
+
+    Parameters
+    ----------
+    data : array, shape (n_channels, n_times)
+        Evoked data.
+    info : dict
+        Measurement info.
+    times : array
+        Times.
+    forward : instance of Forward
+        Forward operator.
+    noise_cov : instance of Covariance
+        The noise covariance.
+    n_dipoles : int
+        The number of dipoles to estimate. The default value is 2.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    return_explained_data : bool
+        If True, the explained data is returned as an array.
+
+    Returns
+    -------
+    dipoles : list of instances of Dipole
+        The dipole fits.
+    explained_data : array | None
+        Data explained by the dipoles using a least square fitting with the
+        selected active dipoles and their estimated orientation.
+        Computed only if return_explained_data is True.
+    """
+
+    is_free_ori, ch_names, proj, vertno, G = _prepare_beamformer_input(
+        info, forward, label=None, picks=picks, pick_ori=None)
+
+    gain = G.copy()
+
+    # Handle whitening + data covariance
+    whitener, _ = compute_whitener(noise_cov, info, picks)
+    if info['projs']:
+        whitener = np.dot(whitener, proj)
+
+    # whiten the leadfield and the data
+    G = np.dot(whitener, G)
+    data = np.dot(whitener, data)
+
+    eig_values, eig_vectors = linalg.eigh(np.dot(data, data.T))
+    phi_sig = eig_vectors[:, -n_dipoles:]
+
+    n_orient = 3 if is_free_ori else 1
+    n_channels = G.shape[0]
+    A = np.empty((n_channels, n_dipoles))
+    gain_dip = np.empty((n_channels, n_dipoles))
+    oris = np.empty((n_dipoles, 3))
+    poss = np.empty((n_dipoles, 3))
+
+    G_proj = G.copy()
+    phi_sig_proj = phi_sig.copy()
+
+    for k in range(n_dipoles):
+        subcorr_max = -1.
+        for i_source in range(G.shape[1] // n_orient):
+            idx_k = slice(n_orient * i_source, n_orient * (i_source + 1))
+            Gk = G_proj[:, idx_k]
+            if n_orient == 3:
+                Gk = np.dot(Gk, forward['source_nn'][idx_k])
+
+            subcorr, ori = _compute_subcorr(Gk, phi_sig_proj)
+            if subcorr > subcorr_max:
+                subcorr_max = subcorr
+                source_idx = i_source
+                source_ori = ori
+                if n_orient == 3 and source_ori[-1] < 0:
+                    # make sure ori is relative to surface ori
+                    source_ori *= -1  # XXX
+
+                source_pos = forward['source_rr'][i_source]
+                if n_orient == 1:
+                    source_ori = forward['source_nn'][i_source]
+
+        idx_k = slice(n_orient * source_idx, n_orient * (source_idx + 1))
+        Ak = G[:, idx_k]
+        if n_orient == 3:
+            Ak = np.dot(Ak, np.dot(forward['source_nn'][idx_k], source_ori))
+
+        A[:, k] = Ak.ravel()
+
+        if return_explained_data:
+            gain_k = gain[:, idx_k]
+            if n_orient == 3:
+                gain_k = np.dot(gain_k,
+                                np.dot(forward['source_nn'][idx_k],
+                                       source_ori))
+            gain_dip[:, k] = gain_k.ravel()
+
+        oris[k] = source_ori
+        poss[k] = source_pos
+
+        logger.info("source %s found: p = %s" % (k + 1, source_idx))
+        if n_orient == 3:
+            logger.info("ori = %s %s %s" % tuple(oris[k]))
+
+        projection = _compute_proj(A[:, :k + 1])
+        G_proj = np.dot(projection, G)
+        phi_sig_proj = np.dot(projection, phi_sig)
+
+    sol = linalg.lstsq(A, data)[0]
+
+    gof, explained_data = [], None
+    if return_explained_data:
+        explained_data = np.dot(gain_dip, sol)
+        gof = (linalg.norm(np.dot(whitener, explained_data)) /
+               linalg.norm(data))
+
+    return _make_dipoles(times, poss,
+                         oris, sol, gof), explained_data
+
+
+def _make_dipoles(times, poss, oris, sol, gof):
+    """Instanciates a list of Dipoles
+
+    Parameters
+    ----------
+    times : array, shape (n_times,)
+        The time instants.
+    poss : array, shape (n_dipoles, 3)
+        The dipoles' positions.
+    oris : array, shape (n_dipoles, 3)
+        The dipoles' orientations.
+    sol : array, shape (n_times,)
+        The dipoles' amplitudes over time.
+    gof : array, shape (n_times,)
+        The goodness of fit of the dipoles.
+        Shared between all dipoles.
+
+    Returns
+    -------
+    dipoles : list
+        The list of Dipole instances.
+    """
+    amplitude = sol * 1e9
+    oris = np.array(oris)
+
+    dipoles = []
+    for i_dip in range(poss.shape[0]):
+        i_pos = poss[i_dip][np.newaxis, :].repeat(len(times), axis=0)
+        i_ori = oris[i_dip][np.newaxis, :].repeat(len(times), axis=0)
+        dipoles.append(Dipole(times, i_pos, amplitude[i_dip],
+                              i_ori, gof))
+
+    return dipoles
+
+
+def _compute_subcorr(G, phi_sig):
+    """ Compute the subspace correlation
+    """
+    Ug, Sg, Vg = linalg.svd(G, full_matrices=False)
+    tmp = np.dot(Ug.T.conjugate(), phi_sig)
+    Uc, Sc, Vc = linalg.svd(tmp, full_matrices=False)
+    X = np.dot(np.dot(Vg.T, np.diag(1. / Sg)), Uc)  # subcorr
+    return Sc[0], X[:, 0] / linalg.norm(X[:, 0])
+
+
+def _compute_proj(A):
+    """ Compute the orthogonal projection operation for
+    a manifold vector A.
+    """
+    U, _, _ = linalg.svd(A, full_matrices=False)
+    return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate())
+
+
+ at verbose
+def rap_music(evoked, forward, noise_cov, n_dipoles=5, return_residual=False,
+              picks=None, verbose=None):
+    """RAP-MUSIC source localization method.
+
+    Compute Recursively Applied and Projected MUltiple SIgnal Classification
+    (RAP-MUSIC) on evoked data.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked data to localize.
+    forward : instance of Forward
+        Forward operator.
+    noise_cov : instance of Covariance
+        The noise covariance.
+    n_dipoles : int
+        The number of dipoles to look for. The default value is 5.
+    return_residual : bool
+        If True, the residual is returned as an Evoked instance.
+    picks : array-like of int | None
+        Indices (in info) of data channels. If None, MEG and EEG data channels
+        (without bad channels) will be used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    dipoles : list of instance of Dipole
+        The dipole fits.
+    residual : instance of Evoked
+        The residual a.k.a. data not explained by the dipoles.
+        Only returned if return_residual is True.
+
+    See Also
+    --------
+    mne.fit_dipole
+
+    Notes
+    -----
+    The references are:
+
+        J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively
+        applied and projected (RAP) MUSIC. Signal Processing, IEEE Trans. 47, 2
+        (February 1999), 332-340.
+        DOI=10.1109/78.740118 http://dx.doi.org/10.1109/78.740118
+
+        Mosher, J.C.; Leahy, R.M., EEG and MEG source localization using
+        recursively applied (RAP) MUSIC, Signals, Systems and Computers, 1996.
+        pp.1201,1207 vol.2, 3-6 Nov. 1996
+        doi: 10.1109/ACSSC.1996.599135
+
+    .. versionadded:: 0.9.0
+    """
+
+    info = evoked.info
+    data = evoked.data
+    times = evoked.times
+
+    picks = _setup_picks(picks, info, forward, noise_cov)
+
+    data = data[picks]
+
+    dipoles, explained_data = _apply_rap_music(data, info, times, forward,
+                                               noise_cov, n_dipoles,
+                                               picks, return_residual)
+
+    if return_residual:
+        residual = evoked.copy()
+        selection = [info['ch_names'][p] for p in picks]
+
+        residual = pick_channels_evoked(residual,
+                                        include=selection)
+        residual.data -= explained_data
+        active_projs = [p for p in residual.info['projs'] if p['active']]
+        for p in active_projs:
+            p['active'] = False
+        residual.add_proj(active_projs, remove_existing=True)
+        residual.apply_proj()
+        return dipoles, residual
+    else:
+        return dipoles
diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py
index 6eaf4ad..b5f48d7 100644
--- a/mne/beamformer/tests/test_dics.py
+++ b/mne/beamformer/tests/test_dics.py
@@ -8,26 +8,35 @@ import numpy as np
 from numpy.testing import assert_array_equal, assert_array_almost_equal
 
 import mne
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
 from mne.time_frequency import compute_epochs_csd
 from mne.externals.six import advance_iterator
+from mne.utils import run_tests_if_main, clean_warning_registry
 
 # Note that this is the first test file, this will apply to all subsequent
 # tests in a full nosetest:
 warnings.simplefilter("always")  # ensure we can verify expected warnings
 
-data_path = sample.data_path(download=False)
-fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
-fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
 fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
-                        'sample_audvis-meg-vol-7-fwd.fif')
-fname_event = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
+                        'sample_audvis_trunc-meg-vol-7-fwd.fif')
+fname_event = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc_raw-eve.fif')
 label = 'Aud-lh'
 fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
 
+# bit of a hack to deal with old scipy/numpy throwing warnings in tests
+clean_warning_registry()
+
+
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = mne.read_forward_solution(*args, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False)
+
 
 def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
     """Read in data used in tests
@@ -37,8 +46,8 @@ def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
     raw = mne.io.Raw(fname_raw, preload=False)
     forward = mne.read_forward_solution(fname_fwd)
     if read_all_forward:
-        forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
-        forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True,
+        forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
+        forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
                                                   surf_ori=True)
         forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
     else:
@@ -66,7 +75,7 @@ def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
 
     # Computing the data and noise cross-spectral density matrices
     if compute_csds:
-        data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.04,
+        data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.045,
                                       tmax=None, fmin=8, fmax=12,
                                       mt_bandwidth=72.72)
         noise_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=None,
@@ -79,7 +88,7 @@ def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
         forward_surf_ori, forward_fixed, forward_vol
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_dics():
     """Test DICS with evoked data and single trials
     """
@@ -89,17 +98,20 @@ def test_dics():
     stc = dics(evoked, forward, noise_csd=noise_csd, data_csd=data_csd,
                label=label)
 
+    stc.crop(0, None)
     stc_pow = np.sum(stc.data, axis=1)
     idx = np.argmax(stc_pow)
     max_stc = stc.data[idx]
     tmax = stc.times[np.argmax(max_stc)]
 
-    assert_true(0.09 < tmax < 0.11)
-    assert_true(10 < np.max(max_stc) < 11)
+    # Incorrect due to limited number of epochs
+    assert_true(0.04 < tmax < 0.05)
+    assert_true(10 < np.max(max_stc) < 13)
 
     # Test picking normal orientation
     stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
                       pick_ori="normal", label=label)
+    stc_normal.crop(0, None)
 
     # The amplitude of normal orientation results should always be smaller than
     # free orientation results
@@ -135,20 +147,20 @@ def test_dics():
     assert_true(len(epochs.events) == len(stcs))
 
     # Average the single trial estimates
-    stc_avg = np.zeros_like(stcs[0].data)
+    stc_avg = np.zeros_like(stc.data)
     for this_stc in stcs:
-        stc_avg += this_stc.data
+        stc_avg += this_stc.crop(0, None).data
     stc_avg /= len(stcs)
 
     idx = np.argmax(np.max(stc_avg, axis=1))
     max_stc = stc_avg[idx]
     tmax = stc.times[np.argmax(max_stc)]
 
-    assert_true(0.045 < tmax < 0.055)  # odd due to limited number of epochs
-    assert_true(17.5 < np.max(max_stc) < 18.5)
+    assert_true(0.045 < tmax < 0.06)  # incorrect due to limited # of epochs
+    assert_true(12 < np.max(max_stc) < 18.5)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_dics_source_power():
     """Test DICS source power computation
     """
@@ -162,8 +174,8 @@ def test_dics_source_power():
     max_source_power = np.max(stc_source_power.data)
 
     # TODO: Maybe these could be more directly compared to dics() results?
-    assert_true(max_source_idx == 18)
-    assert_true(1.05 < max_source_power < 1.15)
+    assert_true(max_source_idx == 0)
+    assert_true(0.5 < max_source_power < 1.15)
 
     # Test picking normal orientation and using a list of CSD matrices
     stc_normal = dics_source_power(epochs.info, forward_surf_ori,
@@ -213,7 +225,7 @@ def test_dics_source_power():
     assert len(w) == 1
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_tf_dics():
     """Test TF beamforming based on DICS
     """
@@ -237,7 +249,6 @@ def test_tf_dics():
                    freq_bins, reg=reg, label=label)
 
     assert_true(len(stcs) == len(freq_bins))
-    print(stcs[0].shape)
     assert_true(stcs[0].shape[1] == 4)
 
     # Manually calculating source power in several time windows to compare
@@ -296,3 +307,6 @@ def test_tf_dics():
                    label=label)
 
     assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
+
+
+run_tests_if_main()
diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
index fcb5f6d..d92c60a 100644
--- a/mne/beamformer/tests/test_lcmv.py
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -7,28 +7,33 @@ import warnings
 
 import mne
 from mne import compute_covariance
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv
 from mne.beamformer._lcmv import _lcmv_source_power
-from mne.source_estimate import SourceEstimate, VolSourceEstimate
 from mne.externals.six import advance_iterator
+from mne.utils import run_tests_if_main, slow_test
 
 
-data_path = sample.data_path(download=False)
-fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
-fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
-fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
 fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
-                        'sample_audvis-meg-vol-7-fwd.fif')
-fname_event = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
+                        'sample_audvis_trunc-meg-vol-7-fwd.fif')
+fname_event = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc_raw-eve.fif')
 label = 'Aud-lh'
 fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = mne.read_forward_solution(*args, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False)
+
+
 def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
               epochs_preload=True, data_cov=True):
     """Read in data used in tests
@@ -38,10 +43,10 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
     raw = mne.io.Raw(fname_raw, preload=True)
     forward = mne.read_forward_solution(fname_fwd)
     if all_forward:
-        forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
-        forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True,
+        forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
+        forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,
                                                   surf_ori=True)
-        forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
+        forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)
     else:
         forward_surf_ori = None
         forward_fixed = None
@@ -77,7 +82,8 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
     noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
                                    eeg=0.1, proj=True)
     if data_cov:
-        data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
+        with warnings.catch_warnings(record=True):
+            data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
     else:
         data_cov = None
 
@@ -85,7 +91,8 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
         forward_surf_ori, forward_fixed, forward_vol
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_lcmv():
     """Test LCMV with evoked data and single trials
     """
@@ -94,32 +101,29 @@ def test_lcmv():
 
     for fwd in [forward, forward_vol]:
         stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)
-
-        if fwd is forward:
-            assert_true(isinstance(stc, SourceEstimate))
-        else:
-            assert_true(isinstance(stc, VolSourceEstimate))
+        stc.crop(0.02, None)
 
         stc_pow = np.sum(stc.data, axis=1)
         idx = np.argmax(stc_pow)
         max_stc = stc.data[idx]
         tmax = stc.times[np.argmax(max_stc)]
 
-        assert_true(0.09 < tmax < 0.105)
-        assert_true(1.9 < np.max(max_stc) < 3.)
+        assert_true(0.09 < tmax < 0.105, tmax)
+        assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))
 
         if fwd is forward:
             # Test picking normal orientation (surface source space only)
             stc_normal = lcmv(evoked, forward_surf_ori, noise_cov, data_cov,
                               reg=0.01, pick_ori="normal")
+            stc_normal.crop(0.02, None)
 
             stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
             idx = np.argmax(stc_pow)
             max_stc = stc_normal.data[idx]
             tmax = stc_normal.times[np.argmax(max_stc)]
 
-            assert_true(0.09 < tmax < 0.11)
-            assert_true(1. < np.max(max_stc) < 2.)
+            assert_true(0.04 < tmax < 0.11, tmax)
+            assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))
 
             # The amplitude of normal orientation results should always be
             # smaller than free orientation results
@@ -128,17 +132,18 @@ def test_lcmv():
         # Test picking source orientation maximizing output source power
         stc_max_power = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01,
                              pick_ori="max-power")
+        stc_max_power.crop(0.02, None)
         stc_pow = np.sum(stc_max_power.data, axis=1)
         idx = np.argmax(stc_pow)
         max_stc = stc_max_power.data[idx]
         tmax = stc.times[np.argmax(max_stc)]
 
-        assert_true(0.09 < tmax < 0.1)
-        assert_true(2. < np.max(max_stc) < 3.)
+        assert_true(0.09 < tmax < 0.11, tmax)
+        assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))
 
         # Maximum output source power orientation results should be similar to
         # free orientation results
-        assert_true((stc_max_power.data - stc.data < 0.5).all())
+        assert_true((stc_max_power.data - stc.data < 1).all())
 
     # Test if fixed forward operator is detected when picking normal or
     # max-power orientation
@@ -185,7 +190,7 @@ def test_lcmv():
     assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_lcmv_raw():
     """Test LCMV with raw data
     """
@@ -200,7 +205,7 @@ def test_lcmv_raw():
     picks = mne.pick_types(raw.info, meg=True, exclude='bads',
                            selection=left_temporal_channels)
 
-    data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax)
+    data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)
 
     stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label,
                    start=start, stop=stop, picks=picks)
@@ -211,12 +216,12 @@ def test_lcmv_raw():
 
     # make sure we get an stc with vertices only in the lh
     vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]
-    assert_true(len(stc.vertno[0]) == len(np.intersect1d(vertno[0],
-                                                         label.vertices)))
-    assert_true(len(stc.vertno[1]) == 0)
+    assert_true(len(stc.vertices[0]) == len(np.intersect1d(vertno[0],
+                                                           label.vertices)))
+    assert_true(len(stc.vertices[1]) == 0)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_lcmv_source_power():
     """Test LCMV source power computation
     """
@@ -229,8 +234,8 @@ def test_lcmv_source_power():
     max_source_idx = np.argmax(stc_source_power.data)
     max_source_power = np.max(stc_source_power.data)
 
-    assert_true(max_source_idx == 24)
-    assert_true(2.2 < max_source_power < 2.4)
+    assert_true(max_source_idx == 0, max_source_idx)
+    assert_true(0.4 < max_source_power < 2.4, max_source_power)
 
     # Test picking normal orientation and using a list of CSD matrices
     stc_normal = _lcmv_source_power(epochs.info, forward_surf_ori, noise_cov,
@@ -257,12 +262,10 @@ def test_lcmv_source_power():
                   noise_cov, data_cov, pick_ori="normal")
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_tf_lcmv():
     """Test TF beamforming based on LCMV
     """
-    fname_raw = op.join(data_path, 'MEG', 'sample',
-                        'sample_audvis_filt-0-40_raw.fif')
     label = mne.read_label(fname_label)
     events = mne.read_events(fname_event)
     raw = mne.io.Raw(fname_raw, preload=True)
@@ -298,7 +301,7 @@ def test_tf_lcmv():
         raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1, picks=picks)
         epochs_band = mne.Epochs(raw_band, epochs.events, epochs.event_id,
                                  tmin=tmin, tmax=tmax, baseline=None,
-                                 proj=True)
+                                 proj=True, picks=picks)
         with warnings.catch_warnings(record=True):  # not enough samples
             noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin +
                                            win_length)
@@ -320,8 +323,9 @@ def test_tf_lcmv():
                                                       reg=reg, label=label)
                 source_power.append(stc_source_power.data)
 
-    stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
-                   freq_bins, reg=reg, label=label)
+    with warnings.catch_warnings(record=True):
+        stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep,
+                       win_lengths, freq_bins, reg=reg, label=label)
 
     assert_true(len(stcs) == len(freq_bins))
     assert_true(stcs[0].shape[1] == 4)
@@ -357,8 +361,9 @@ def test_tf_lcmv():
     # the underlying raw object
     epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                                   baseline=(None, 0), preload=True)
-    assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs,
-                  tmin, tmax, tstep, win_lengths, freq_bins)
+    with warnings.catch_warnings(record=True):  # not enough samples
+        assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward,
+                      noise_covs, tmin, tmax, tstep, win_lengths, freq_bins)
 
     with warnings.catch_warnings(record=True):  # not enough samples
         # Pass only one epoch to test if subtracting evoked
@@ -368,3 +373,6 @@ def test_tf_lcmv():
                        label=label)
 
     assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
+
+
+run_tests_if_main()
diff --git a/mne/beamformer/tests/test_rap_music.py b/mne/beamformer/tests/test_rap_music.py
new file mode 100644
index 0000000..ce73f0b
--- /dev/null
+++ b/mne/beamformer/tests/test_rap_music.py
@@ -0,0 +1,152 @@
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import numpy as np
+from scipy import linalg
+
+import warnings
+from nose.tools import assert_true
+
+import mne
+from mne.datasets import testing
+from mne.beamformer import rap_music
+from mne.utils import run_tests_if_main
+
+
+data_path = testing.data_path(download=False)
+fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _read_forward_solution_meg(fname_fwd, **kwargs):
+    fwd = mne.read_forward_solution(fname_fwd, **kwargs)
+    return mne.pick_types_forward(fwd, meg=True, eeg=False,
+                                  exclude=['MEG 2443'])
+
+
+def _get_data(event_id=1):
+    """Read in data used in tests
+    """
+    # Read evoked
+    evoked = mne.read_evokeds(fname_ave, event_id)
+    evoked.pick_types(meg=True, eeg=False)
+    evoked.crop(0, 0.3)
+
+    forward = mne.read_forward_solution(fname_fwd)
+
+    forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True)
+    forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True,
+                                               surf_ori=True)
+
+    noise_cov = mne.read_cov(fname_cov)
+
+    return evoked, noise_cov, forward, forward_surf_ori, forward_fixed
+
+
+def simu_data(evoked, forward, noise_cov, n_dipoles, times):
+    """Simulate an evoked dataset with 2 sources
+
+    One source is put in each hemisphere.
+    """
+    # Generate the two dipoles data
+    mu, sigma = 0.1, 0.005
+    s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
+                                                   (2 * sigma ** 2))
+
+    mu, sigma = 0.075, 0.008
+    s2 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
+                                                   (2 * sigma ** 2))
+    data = np.array([s1, s2]) * 1e-9
+
+    src = forward['src']
+    rng = np.random.RandomState(42)
+
+    rndi = rng.randint(len(src[0]['vertno']))
+    lh_vertno = src[0]['vertno'][[rndi]]
+
+    rndi = rng.randint(len(src[1]['vertno']))
+    rh_vertno = src[1]['vertno'][[rndi]]
+
+    vertices = [lh_vertno, rh_vertno]
+    tmin, tstep = times.min(), 1 / evoked.info['sfreq']
+    stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep)
+
+    sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info,
+                                                noise_cov, snr=20,
+                                                random_state=rng)
+
+    return sim_evoked, stc
+
+
+def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
+    src = fwd['src']
+    pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
+                                     stc.vertices[0])]
+    pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
+                                     stc.vertices[1])[0] +
+                            len(src[0]['vertno'])]
+
+    # Check the position of the two dipoles
+    assert_true(dipoles[0].pos[0] in np.array([pos1, pos2]))
+    assert_true(dipoles[1].pos[0] in np.array([pos1, pos2]))
+
+    ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
+                                     stc.vertices[0])[0]][0]
+    ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
+                                     stc.vertices[1])[0] +
+                            len(src[0]['vertno'])][0]
+
+    # Check the orientation of the dipoles
+    assert_true(np.max(np.abs(np.dot(dipoles[0].ori[0],
+                                     np.array([ori1, ori2]).T))) > 0.99)
+
+    assert_true(np.max(np.abs(np.dot(dipoles[1].ori[0],
+                                     np.array([ori1, ori2]).T))) > 0.99)
+
+    if residual is not None:
+        picks_grad = mne.pick_types(residual.info, meg='grad')
+        picks_mag = mne.pick_types(residual.info, meg='mag')
+        rel_tol = 0.02
+        for picks in [picks_grad, picks_mag]:
+            assert_true(linalg.norm(residual.data[picks], ord='fro') <
+                        rel_tol *
+                        linalg.norm(evoked.data[picks], ord='fro'))
+
+
+ at testing.requires_testing_data
+def test_rap_music_simulated():
+    """Test RAP-MUSIC with simulated evoked
+    """
+    evoked, noise_cov, forward, forward_surf_ori, forward_fixed =\
+        _get_data()
+
+    n_dipoles = 2
+    sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
+                                n_dipoles, evoked.times)
+    # Check dipoles for fixed ori
+    dipoles = rap_music(sim_evoked, forward_fixed, noise_cov,
+                        n_dipoles=n_dipoles)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked)
+
+    dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+    # Check dipoles for free ori
+    dipoles, residual = rap_music(sim_evoked, forward, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+    # Check dipoles for free surface ori
+    dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov,
+                                  n_dipoles=n_dipoles, return_residual=True)
+    _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
+
+run_tests_if_main()
diff --git a/mne/bem.py b/mne/bem.py
new file mode 100644
index 0000000..2e83e22
--- /dev/null
+++ b/mne/bem.py
@@ -0,0 +1,1660 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Lorenzo De Santis <lorenzo.de-santis at u-psud.fr>
+#
+# License: BSD (3-clause)
+
+import sys
+import os
+import os.path as op
+import shutil
+import glob
+import numpy as np
+from scipy import linalg
+
+from .fixes import partial
+from .utils import (verbose, logger, run_subprocess, deprecated,
+                    get_subjects_dir)
+from .transforms import _ensure_trans, apply_trans
+from .io.constants import FIFF
+from .io.write import (start_file, start_block, write_float, write_int,
+                       write_float_matrix, write_int_matrix, end_block,
+                       end_file)
+from .io.tag import find_tag
+from .io.tree import dir_tree_find
+from .io.open import fiff_open
+from .externals.six import string_types
+
+
+# ############################################################################
+# Compute BEM solution
+
+# define VEC_DIFF(from,to,diff) {\
+# (diff)[X] = (to)[X] - (from)[X];\
+
+# The following approach is based on:
+#
+# de Munck JC: "A linear discretization of the volume conductor boundary
+# integral equation using analytically integrated elements",
+# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
+#
+
+
+class ConductorModel(dict):
+    """BEM or sphere model"""
+    def __repr__(self):
+        if self['is_sphere']:
+            center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])
+            pl = '' if len(self['layers']) == 1 else 's'
+            rad = self.radius
+            if rad is None:  # no radius / MEG only
+                extra = 'Sphere (no layers): r0=[%s] mm' % center
+            else:
+                extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'
+                         % (len(self['layers']) - 1, pl, center, rad * 1000.))
+        else:
+            pl = '' if len(self['surfs']) == 1 else 's'
+            extra = ('BEM (%s layer%s)' % (len(self['surfs']), pl))
+        return '<ConductorModel  |  %s>' % extra
+
+    @property
+    def radius(self):
+        if not self['is_sphere']:
+            raise RuntimeError('radius undefined for BEM')
+        return None if len(self['layers']) == 0 else self['layers'][-1]['rad']
+
+
+def _calc_beta(rk, rk_norm, rk1, rk1_norm):
+    """These coefficients are used to calculate the magic vector omega"""
+    rkk1 = rk1[0] - rk[0]
+    size = np.sqrt(np.dot(rkk1, rkk1))
+    rkk1 /= size
+    num = rk_norm + np.dot(rk, rkk1)
+    den = rk1_norm + np.dot(rk1, rkk1)
+    res = np.log(num / den) / size
+    return res
+
+
+def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
+    """The linear potential matrix element computations"""
+    from .source_space import _fast_cross_nd_sum
+    omega = np.zeros((len(fros), 3))
+
+    # we replicate a little bit of the _get_solids code here for speed
+    v1 = tri_rr[np.newaxis, 0, :] - fros
+    v2 = tri_rr[np.newaxis, 1, :] - fros
+    v3 = tri_rr[np.newaxis, 2, :] - fros
+    triples = _fast_cross_nd_sum(v1, v2, v3)
+    l1 = np.sqrt(np.sum(v1 * v1, axis=1))
+    l2 = np.sqrt(np.sum(v2 * v2, axis=1))
+    l3 = np.sqrt(np.sum(v3 * v3, axis=1))
+    ss = (l1 * l2 * l3 +
+          np.sum(v1 * v2, axis=1) * l3 +
+          np.sum(v1 * v3, axis=1) * l2 +
+          np.sum(v2 * v3, axis=1) * l1)
+    solids = np.arctan2(triples, ss)
+
+    # We *could* subselect the good points from v1, v2, v3, triples, solids,
+    # l1, l2, and l3, but there are *very* few bad points. So instead we do
+    # some unnecessary calculations, and then omit them from the final
+    # solution. These three lines ensure we don't get invalid values in
+    # _calc_beta.
+    bad_mask = np.abs(solids) < np.pi / 1e6
+    l1[bad_mask] = 1.
+    l2[bad_mask] = 1.
+    l3[bad_mask] = 1.
+
+    # Calculate the magic vector vec_omega
+    beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
+            _calc_beta(v2, l2, v3, l3)[:, np.newaxis],
+            _calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
+    vec_omega = (beta[2] - beta[0]) * v1
+    vec_omega += (beta[0] - beta[1]) * v2
+    vec_omega += (beta[1] - beta[2]) * v3
+
+    area2 = 2.0 * tri_area
+    n2 = 1.0 / (area2 * area2)
+    # leave omega = 0 otherwise
+    # Put it all together...
+    yys = [v1, v2, v3]
+    idx = [0, 1, 2, 0, 2]
+    for k in range(3):
+        diff = yys[idx[k - 1]] - yys[idx[k + 1]]
+        zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
+        omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
+                             triples * (diff * vec_omega).sum(axis=-1))
+    # omit the bad points from the solution
+    omega[bad_mask] = 0.
+    return omega
+
+
+def _correct_auto_elements(surf, mat):
+    """Improve auto-element approximation..."""
+    pi2 = 2.0 * np.pi
+    tris_flat = surf['tris'].ravel()
+    misses = pi2 - mat.sum(axis=1)
+    for j, miss in enumerate(misses):
+        # How much is missing?
+        n_memb = len(surf['neighbor_tri'][j])
+        # The node itself receives one half
+        mat[j, j] = miss / 2.0
+        # The rest is divided evenly among the member nodes...
+        miss /= (4.0 * n_memb)
+        members = np.where(j == tris_flat)[0]
+        mods = members % 3
+        offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
+        tri_1 = members + offsets[mods, 0]
+        tri_2 = members + offsets[mods, 1]
+        for t1, t2 in zip(tri_1, tri_2):
+            mat[j, tris_flat[t1]] += miss
+            mat[j, tris_flat[t2]] += miss
+    return
+
+
+def _fwd_bem_lin_pot_coeff(surfs):
+    """Calculate the coefficients for linear collocation approach"""
+    # taken from fwd_bem_linear_collocation.c
+    nps = [surf['np'] for surf in surfs]
+    np_tot = sum(nps)
+    coeff = np.zeros((np_tot, np_tot))
+    offsets = np.cumsum(np.concatenate(([0], nps)))
+    for si_1, surf1 in enumerate(surfs):
+        rr_ord = np.arange(nps[si_1])
+        for si_2, surf2 in enumerate(surfs):
+            logger.info("        %s (%d) -> %s (%d) ..." %
+                        (_bem_explain_surface(surf1['id']), nps[si_1],
+                         _bem_explain_surface(surf2['id']), nps[si_2]))
+            tri_rr = surf2['rr'][surf2['tris']]
+            tri_nn = surf2['tri_nn']
+            tri_area = surf2['tri_area']
+            submat = coeff[offsets[si_1]:offsets[si_1 + 1],
+                           offsets[si_2]:offsets[si_2 + 1]]  # view
+            for k in range(surf2['ntri']):
+                tri = surf2['tris'][k]
+                if si_1 == si_2:
+                    skip_idx = ((rr_ord == tri[0]) |
+                                (rr_ord == tri[1]) |
+                                (rr_ord == tri[2]))
+                else:
+                    skip_idx = list()
+                # No contribution from a triangle that
+                # this vertex belongs to
+                # if sidx1 == sidx2 and (tri == j).any():
+                #     continue
+                # Otherwise do the hard job
+                coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
+                                        tri_area[k])
+                coeffs[skip_idx] = 0.
+                submat[:, tri] -= coeffs
+            if si_1 == si_2:
+                _correct_auto_elements(surf1, submat)
+    return coeff
+
+
+def _fwd_bem_multi_solution(solids, gamma, nps):
+    """Do multi surface solution
+
+      * Invert I - solids/(2*M_PI)
+      * Take deflation into account
+      * The matrix is destroyed after inversion
+      * This is the general multilayer case
+
+    """
+    pi2 = 1.0 / (2 * np.pi)
+    n_tot = np.sum(nps)
+    assert solids.shape == (n_tot, n_tot)
+    nsurf = len(nps)
+    defl = 1.0 / n_tot
+    # Modify the matrix
+    offsets = np.cumsum(np.concatenate(([0], nps)))
+    for si_1 in range(nsurf):
+        for si_2 in range(nsurf):
+            mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
+            slice_j = slice(offsets[si_1], offsets[si_1 + 1])
+            slice_k = slice(offsets[si_2], offsets[si_2 + 1])
+            solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
+    solids += np.eye(n_tot)
+    return linalg.inv(solids, overwrite_a=True)
+
+
+def _fwd_bem_homog_solution(solids, nps):
+    """Helper to make a homogeneous solution"""
+    return _fwd_bem_multi_solution(solids, None, nps)
+
+
+def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
+    """Modify the solution according to the IP approach"""
+    n_last = n_tri[-1]
+    mult = (1.0 + ip_mult) / ip_mult
+
+    logger.info('        Combining...')
+    offsets = np.cumsum(np.concatenate(([0], n_tri)))
+    for si in range(len(n_tri)):
+        # Pick the correct submatrix (right column) and multiply
+        sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
+        # Multiply
+        sub -= 2 * np.dot(sub, ip_solution)
+
+    # The lower right corner is a special case
+    sub[-n_last:, -n_last:] += mult * ip_solution
+
+    # Final scaling
+    logger.info('        Scaling...')
+    solution *= ip_mult
+    return
+
+
+def _fwd_bem_linear_collocation_solution(m):
+    """Compute the linear collocation potential solution"""
+    # first, add surface geometries
+    from .surface import _complete_surface_info
+    for surf in m['surfs']:
+        _complete_surface_info(surf, verbose=False)
+
+    logger.info('Computing the linear collocation solution...')
+    logger.info('    Matrix coefficients...')
+    coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
+    m['nsol'] = len(coeff)
+    logger.info("    Inverting the coefficient matrix...")
+    nps = [surf['np'] for surf in m['surfs']]
+    m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
+    if len(m['surfs']) == 3:
+        ip_mult = m['sigma'][1] / m['sigma'][2]
+        if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
+            logger.info('IP approach required...')
+            logger.info('    Matrix coefficients (homog)...')
+            coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
+            logger.info('    Inverting the coefficient matrix (homog)...')
+            ip_solution = _fwd_bem_homog_solution(coeff,
+                                                  [m['surfs'][-1]['np']])
+            logger.info('    Modify the original solution to incorporate '
+                        'IP approach...')
+            _fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
+                                        nps)
+    m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
+    logger.info("Solution ready.")
+
+
+ at verbose
+def make_bem_solution(surfs, verbose=None):
+    """Create a BEM solution using the linear collocation approach
+
+    Parameters
+    ----------
+    surfs : list of dict
+        The BEM surfaces to use (`from make_bem_model`)
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    bem : instance of ConductorModel
+        The BEM solution.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    See Also
+    --------
+    make_bem_model
+    read_bem_surfaces
+    write_bem_surfaces
+    read_bem_solution
+    write_bem_solution
+    """
+    logger.info('Approximation method : Linear collocation\n')
+    if isinstance(surfs, string_types):
+        # Load the surfaces
+        logger.info('Loading surfaces...')
+        surfs = read_bem_surfaces(surfs)
+    bem = ConductorModel(is_sphere=False, surfs=surfs)
+    _add_gamma_multipliers(bem)
+    if len(bem['surfs']) == 3:
+        logger.info('Three-layer model surfaces loaded.')
+    elif len(bem['surfs']) == 1:
+        logger.info('Homogeneous model surface loaded.')
+    else:
+        raise RuntimeError('Only 1- or 3-layer BEM computations supported')
+    _fwd_bem_linear_collocation_solution(bem)
+    logger.info('BEM geometry computations complete.')
+    return bem
+
+
+# ############################################################################
+# Make BEM model
+
+def _ico_downsample(surf, dest_grade):
+    """Downsample the surface if isomorphic to a subdivided icosahedron"""
+    from .surface import _get_ico_surface
+    n_tri = surf['ntri']
+    found = -1
+    bad_msg = ("A surface with %d triangles cannot be isomorphic with a "
+               "subdivided icosahedron." % surf['ntri'])
+    if n_tri % 20 != 0:
+        raise RuntimeError(bad_msg)
+    n_tri = n_tri // 20
+    found = int(round(np.log(n_tri) / np.log(4)))
+    if n_tri != 4 ** found:
+        raise RuntimeError(bad_msg)
+    del n_tri
+
+    if dest_grade > found:
+        raise RuntimeError('For this surface, decimation grade should be %d '
+                           'or less, not %s.' % (found, dest_grade))
+
+    source = _get_ico_surface(found)
+    dest = _get_ico_surface(dest_grade, patch_stats=True)
+    del dest['tri_cent']
+    del dest['tri_nn']
+    del dest['neighbor_tri']
+    del dest['tri_area']
+    if not np.array_equal(source['tris'], surf['tris']):
+        raise RuntimeError('The source surface has a matching number of '
+                           'triangles but ordering is wrong')
+    logger.info('Going from %dth to %dth subdivision of an icosahedron '
+                '(n_tri: %d -> %d)' % (found, dest_grade, surf['ntri'],
+                                       dest['ntri']))
+    # Find the mapping
+    dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
+    return dest
+
+
+def _get_ico_map(fro, to):
+    """Helper to get a mapping between ico surfaces"""
+    from .surface import _compute_nearest
+    nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
+    n_bads = (dists > 5e-3).sum()
+    if n_bads > 0:
+        raise RuntimeError('No matching vertex for %d destination vertices'
+                           % (n_bads))
+    return nearest
+
+
+def _order_surfaces(surfs):
+    """Reorder the surfaces"""
+    if len(surfs) != 3:
+        return surfs
+    # we have three surfaces
+    surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                  FIFF.FIFFV_BEM_SURF_ID_SKULL,
+                  FIFF.FIFFV_BEM_SURF_ID_BRAIN]
+    ids = np.array([surf['id'] for surf in surfs])
+    if set(ids) != set(surf_order):
+        raise RuntimeError('bad surface ids: %s' % ids)
+    order = [np.where(ids == id_)[0][0] for id_ in surf_order]
+    surfs = [surfs[idx] for idx in order]
+    return surfs
+
+
+def _assert_complete_surface(surf):
+    """Check the sum of solid angles as seen from inside"""
+    # from surface_checks.c
+    from .source_space import _get_solids
+    tot_angle = 0.
+    # Center of mass....
+    cm = surf['rr'].mean(axis=0)
+    logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
+                (_surf_name[surf['id']],
+                 1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
+    tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
+    if np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5:
+        raise RuntimeError('Surface %s is not complete (sum of solid angles '
+                           '= %g * 4*PI instead).' %
+                           (_surf_name[surf['id']], tot_angle))
+
+
+_surf_name = {
+    FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
+    FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
+    FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
+    FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown    ',
+}
+
+
+def _assert_inside(fro, to):
+    """Helper to check one set of points is inside a surface"""
+    # this is "is_inside" in surface_checks.c
+    from .source_space import _get_solids
+    tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
+    if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
+        raise RuntimeError('Surface %s is not completely inside surface %s'
+                           % (_surf_name[fro['id']], _surf_name[to['id']]))
+
+
+def _check_surfaces(surfs):
+    """Check that the surfaces are complete and non-intersecting"""
+    for surf in surfs:
+        _assert_complete_surface(surf)
+    # Then check the topology
+    for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+        logger.info('Checking that %s surface is inside %s surface...' %
+                    (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
+        _assert_inside(surf_2, surf_1)
+
+
+def _check_surface_size(surf):
+    """Check that the coordinate limits are reasonable"""
+    sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
+    if (sizes < 0.05).any():
+        raise RuntimeError('Dimensions of the surface %s seem too small '
+                           '(%9.5f mm). Maybe the the unit of measure is '
+                           'meters instead of mm' %
+                           (_surf_name[surf['id']], 1000 * sizes.min()))
+
+
+def _check_thicknesses(surfs):
+    """How close are we?"""
+    from .surface import _compute_nearest
+    for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+        min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
+                                    return_dists=True)[0]
+        min_dist = min_dist.min()
+        logger.info('Checking distance between %s and %s surfaces...' %
+                    (_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
+        logger.info('Minimum distance between the %s and %s surfaces is '
+                    'approximately %6.1f mm' %
+                    (_surf_name[surf_1['id']], _surf_name[surf_2['id']],
+                     1000 * min_dist))
+
+
+def _surfaces_to_bem(fname_surfs, ids, sigmas, ico=None):
+    """Convert surfaces to a BEM
+    """
+    from .surface import _read_surface_geom
+    # equivalent of mne_surf2bem
+    surfs = list()
+    assert len(fname_surfs) in (1, 3)
+    for fname in fname_surfs:
+        surfs.append(_read_surface_geom(fname, patch_stats=False,
+                                        verbose=False))
+        surfs[-1]['rr'] /= 1000.
+    # Downsampling if the surface is isomorphic with a subdivided icosahedron
+    if ico is not None:
+        for si, surf in enumerate(surfs):
+            surfs[si] = _ico_downsample(surf, ico)
+    for surf, id_ in zip(surfs, ids):
+        surf['id'] = id_
+
+    # Shifting surfaces is not implemented here
+
+    # Order the surfaces for the benefit of the topology checks
+    for surf, sigma in zip(surfs, sigmas):
+        surf['sigma'] = sigma
+    surfs = _order_surfaces(surfs)
+
+    # Check topology as best we can
+    _check_surfaces(surfs)
+    for surf in surfs:
+        _check_surface_size(surf)
+    _check_thicknesses(surfs)
+    logger.info('Surfaces passed the basic topology checks.')
+    return surfs
+
+
+ at verbose
+def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
+                   subjects_dir=None, verbose=None):
+    """Create a BEM model for a subject
+
+    .. note:: To get a single layer bem corresponding to the --homog flag in
+              the command line tool set the ``connectivity`` accordingly
+
+    Parameters
+    ----------
+    subject : str
+        The subject.
+    ico : int | None
+        The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
+        If None, no subsampling is applied.
+    conductivity : array of int, shape (3,) or (1,)
+        The conductivities to use for each shell. Should be a single element
+        for a one-layer model, or three elements for a three-layer model.
+        Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
+        single-layer model would be ``[0.3]``.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surfaces : list of dict
+        The BEM surfaces. Use `make_bem_solution` to turn these into a
+        `ConductorModel` suitable for forward calculation.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    See Also
+    --------
+    make_bem_solution
+    make_sphere_model
+    read_bem_surfaces
+    write_bem_surfaces
+    """
+    conductivity = np.array(conductivity, float)
+    if conductivity.ndim != 1 or conductivity.size not in (1, 3):
+        raise ValueError('conductivity must be 1D array-like with 1 or 3 '
+                         'elements')
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    subject_dir = op.join(subjects_dir, subject)
+    bem_dir = op.join(subject_dir, 'bem')
+    inner_skull = op.join(bem_dir, 'inner_skull.surf')
+    outer_skull = op.join(bem_dir, 'outer_skull.surf')
+    outer_skin = op.join(bem_dir, 'outer_skin.surf')
+    surfaces = [inner_skull, outer_skull, outer_skin]
+    ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+           FIFF.FIFFV_BEM_SURF_ID_SKULL,
+           FIFF.FIFFV_BEM_SURF_ID_HEAD]
+    logger.info('Creating the BEM geometry...')
+    if len(conductivity) == 1:
+        surfaces = surfaces[:1]
+        ids = ids[:1]
+    surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
+    logger.info('Complete.\n')
+    return surfaces
+
+
+# ############################################################################
+# Compute EEG sphere model
+
+def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):
+    """Get the model depended weighting factor for n"""
+    nlayer = len(m['layers'])
+    if nlayer in (0, 1):
+        return 1.
+
+    # Initialize the arrays
+    c1 = np.zeros(nlayer - 1)
+    c2 = np.zeros(nlayer - 1)
+    cr = np.zeros(nlayer - 1)
+    cr_mult = np.zeros(nlayer - 1)
+    for k in range(nlayer - 1):
+        c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']
+        c2[k] = c1[k] - 1.0
+        cr_mult[k] = m['layers'][k]['rel_rad']
+        cr[k] = cr_mult[k]
+        cr_mult[k] *= cr_mult[k]
+
+    coeffs = np.zeros(n_terms - 1)
+    for n in range(1, n_terms):
+        # Increment the radius coefficients
+        for k in range(nlayer - 1):
+            cr[k] *= cr_mult[k]
+
+        # Multiply the matrices
+        M = np.eye(2)
+        n1 = n + 1.0
+        for k in range(nlayer - 2, -1, -1):
+            M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],
+                        [n * c2[k] * cr[k], n1 + n * c1[k]]], M)
+        num = n * (2.0 * n + 1.0) ** (nlayer - 1)
+        coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])
+    return coeffs
+
+
+def _compose_linear_fitting_data(mu, u):
+    # y is the data to be fitted (nterms-1 x 1)
+    # M is the model matrix      (nterms-1 x nfit-1)
+    for k in range(u['nterms'] - 1):
+        k1 = k + 1
+        mu1n = np.power(mu[0], k1)
+        u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])
+        for p in range(u['nfit'] - 1):
+            u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)
+
+
+def _compute_linear_parameters(mu, u):
+    """Compute the best-fitting linear parameters"""
+    _compose_linear_fitting_data(mu, u)
+    uu, sing, vv = linalg.svd(u['M'], full_matrices=False)
+
+    # Compute the residuals
+    u['resi'] = u['y'].copy()
+
+    vec = np.empty(u['nfit'] - 1)
+    for p in range(u['nfit'] - 1):
+        vec[p] = np.dot(uu[:, p], u['y'])
+        for k in range(u['nterms'] - 1):
+            u['resi'][k] -= uu[k, p] * vec[p]
+        vec[p] = vec[p] / sing[p]
+
+    lambda_ = np.zeros(u['nfit'])
+    for p in range(u['nfit'] - 1):
+        sum_ = 0.
+        for q in range(u['nfit'] - 1):
+            sum_ += vv[q, p] * vec[q]
+        lambda_[p + 1] = sum_
+    lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])
+    rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])
+    return rv, lambda_
+
+
+def _one_step(mu, u):
+    """Evaluate the residual sum of squares fit for one set of mu values"""
+    if np.abs(mu).max() > 1.0:
+        return 1.0
+
+    # Compose the data for the linear fitting, compute SVD, then residuals
+    _compose_linear_fitting_data(mu, u)
+    u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])
+    u['resi'][:] = u['y'][:]
+    for p in range(u['nfit'] - 1):
+        dot = np.dot(u['uu'][p], u['y'])
+        for k in range(u['nterms'] - 1):
+            u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot
+
+    # Return their sum of squares
+    return np.dot(u['resi'], u['resi'])
+
+
+def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
+    """Fit the Berg-Scherg equivalent spherical model dipole parameters"""
+    from scipy.optimize import fmin_cobyla
+    assert nfit >= 2
+    u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),
+             nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))
+
+    # (1) Calculate the coefficients of the true expansion
+    u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)
+
+    # (2) Calculate the weighting
+    f = (min([layer['rad'] for layer in m['layers']]) /
+         max([layer['rad'] for layer in m['layers']]))
+
+    # correct weighting
+    k = np.arange(1, nterms + 1)
+    u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /
+                     k) * np.power(f, (k - 1.0))
+    u['w'][-1] = 0
+
+    # Do the nonlinear minimization, constraining mu to the interval [-1, +1]
+    mu_0 = np.random.RandomState(0).rand(nfit) * f
+    fun = partial(_one_step, u=u)
+    max_ = 1. - 2e-4  # adjust for fmin_cobyla "catol" that not all scipy have
+    cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]
+    mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)
+
+    # (6) Do the final step: calculation of the linear parameters
+    rv, lambda_ = _compute_linear_parameters(mu, u)
+    order = np.argsort(mu)[::-1]
+    mu, lambda_ = mu[order], lambda_[order]  # sort: largest mu first
+
+    m['mu'] = mu
+    # This division takes into account the actual conductivities
+    m['lambda'] = lambda_ / m['layers'][-1]['sigma']
+    m['nfit'] = nfit
+    return rv
+
+
+ at verbose
+def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
+                      relative_radii=(0.90, 0.92, 0.97, 1.0),
+                      sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):
+    """Create a spherical model for forward solution calculation
+
+    Parameters
+    ----------
+    r0 : array-like | str
+        Head center to use (in head coordinates). If 'auto', the head
+        center will be calculated from the digitization points in info.
+    head_radius : float | str | None
+        If float, compute spherical shells for EEG using the given radius.
+        If 'auto', estimate an approriate radius from the dig points in Info,
+        If None, exclude shells.
+    info : instance of mne.io.meas_info.Info | None
+        Measurement info. Only needed if ``r0`` or ``head_radius`` are
+        ``'auto'``.
+    relative_radii : array-like
+        Relative radii for the spherical shells.
+    sigmas : array-like
+        Sigma values for the spherical shells.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    sphere : instance of ConductorModel
+        The resulting spherical conductor model.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    make_bem_model
+    make_bem_solution
+    """
+    for name in ('r0', 'head_radius'):
+        param = locals()[name]
+        if isinstance(param, string_types):
+            if param != 'auto':
+                raise ValueError('%s, if str, must be "auto" not "%s"'
+                                 % (name, param))
+
+    if (isinstance(r0, string_types) and r0 == 'auto') or \
+       (isinstance(head_radius, string_types) and head_radius == 'auto'):
+        if info is None:
+            raise ValueError('Info must not be None for auto mode')
+        head_radius_fit, r0_fit = fit_sphere_to_headshape(info)[:2]
+        if isinstance(r0, string_types):
+            r0 = r0_fit / 1000.
+        if isinstance(head_radius, string_types):
+            head_radius = head_radius_fit / 1000.
+    sphere = ConductorModel(is_sphere=True, r0=np.array(r0),
+                            coord_frame=FIFF.FIFFV_COORD_HEAD)
+    sphere['layers'] = list()
+    if head_radius is not None:
+        # Eventually these could be configurable...
+        relative_radii = np.array(relative_radii, float)
+        sigmas = np.array(sigmas, float)
+        order = np.argsort(relative_radii)
+        relative_radii = relative_radii[order]
+        sigmas = sigmas[order]
+        for rel_rad, sig in zip(relative_radii, sigmas):
+            # sort layers by (relative) radius, and scale radii
+            layer = dict(rad=rel_rad, sigma=sig)
+            layer['rel_rad'] = layer['rad'] = rel_rad
+            sphere['layers'].append(layer)
+
+        # scale the radii
+        R = sphere['layers'][-1]['rad']
+        rR = sphere['layers'][-1]['rel_rad']
+        for layer in sphere['layers']:
+            layer['rad'] /= R
+            layer['rel_rad'] /= rR
+
+        #
+        # Setup the EEG sphere model calculations
+        #
+
+        # Scale the relative radii
+        for k in range(len(relative_radii)):
+            sphere['layers'][k]['rad'] = (head_radius *
+                                          sphere['layers'][k]['rel_rad'])
+        rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
+        logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
+        for k in range(3):
+            logger.info('mu%d = %g    lambda%d = %g'
+                        % (k + 1, sphere['mu'][k], k + 1,
+                           sphere['layers'][-1]['sigma'] *
+                           sphere['lambda'][k]))
+        logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
+                    % (1000 * head_radius,))
+    return ConductorModel(sphere)
+
+
+# #############################################################################
+# Helpers
+
+ at verbose
+def fit_sphere_to_headshape(info, dig_kinds=(FIFF.FIFFV_POINT_EXTRA,),
+                            verbose=None):
+    """Fit a sphere to the headshape points to determine head center
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    dig_kinds : tuple of int
+        Kind of digitization points to use in the fitting. These can be
+        any kind defined in io.constants.FIFF::
+
+            FIFFV_POINT_CARDINAL
+            FIFFV_POINT_HPI
+            FIFFV_POINT_EEG
+            FIFFV_POINT_EXTRA
+
+        Defaults to (FIFFV_POINT_EXTRA,).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    radius : float
+        Sphere radius in mm.
+    origin_head: ndarray, shape (3,)
+        Head center in head coordinates (mm).
+    origin_device: ndarray, shape (3,)
+        Head center in device coordinates (mm).
+    """
+    # get head digization points of the specified kind
+    hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]
+    if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):
+        raise RuntimeError('Digitization points not in head coordinates, '
+                           'contact mne-python developers')
+
+    # exclude some frontal points (nose etc.)
+    hsp = [p for p in hsp if not (p[2] < 0 and p[1] > 0)]
+
+    if len(hsp) == 0:
+        raise ValueError('No head digitization points of the specified '
+                         'kinds (%s) found.' % dig_kinds)
+
+    radius, origin_head = _fit_sphere(np.array(hsp), disp=False)
+    # compute origin in device coordinates
+    head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')
+    origin_device = apply_trans(head_to_dev, origin_head)
+    radius *= 1e3
+    origin_head *= 1e3
+    origin_device *= 1e3
+
+    logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' % radius)
+    logger.info('Origin head coordinates:'.ljust(30) +
+                '%0.1f %0.1f %0.1f mm' % tuple(origin_head))
+    logger.info('Origin device coordinates:'.ljust(30) +
+                '%0.1f %0.1f %0.1f mm' % tuple(origin_device))
+
+    return radius, origin_head, origin_device
+
+
+def _fit_sphere(points, disp='auto'):
+    """Aux function to fit a sphere to an arbitrary set of points"""
+    from scipy.optimize import fmin_cobyla
+    if isinstance(disp, string_types) and disp == 'auto':
+        disp = True if logger.level <= 20 else False
+    # initial guess for center and radius
+    radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.
+    radius_init = radii.mean()
+    center_init = np.median(points, axis=0)
+
+    # optimization
+    x0 = np.concatenate([center_init, [radius_init]])
+
+    def cost_fun(center_rad):
+        d = points - center_rad[:3]
+        d = (np.sqrt(np.sum(d * d, axis=1)) - center_rad[3])
+        return np.sum(d * d)
+
+    def constraint(center_rad):
+        return center_rad[3]  # radius must be >= 0
+
+    x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,
+                        rhoend=radius_init * 1e-6, disp=disp)
+
+    origin = x_opt[:3]
+    radius = x_opt[3]
+    return radius, origin
+
+
+# ############################################################################
+# Create BEM surfaces
+
+ at verbose
+def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
+                       volume='T1', atlas=False, gcaatlas=False, preflood=None,
+                       verbose=None):
+    """
+    Create BEM surfaces using the watershed algorithm included with FreeSurfer
+
+    Parameters
+    ----------
+    subject : str
+        Subject name (required)
+    subjects_dir : str
+        Directory containing subjects data. If None use
+        the Freesurfer SUBJECTS_DIR environment variable.
+    overwrite : bool
+        Write over existing files
+    volume : str
+        Defaults to T1
+    atlas : bool
+        Specify the --atlas option for mri_watershed
+    gcaatlas : bool
+        Use the subcortical atlas
+    preflood : int
+        Change the preflood height
+    verbose : bool, str or None
+        If not None, override default verbose level
+
+    .. versionadded:: 0.10
+    """
+    from .surface import read_surface
+    env = os.environ.copy()
+
+    if not os.environ.get('FREESURFER_HOME'):
+        raise RuntimeError('FREESURFER_HOME environment variable not set')
+
+    env['SUBJECT'] = subject
+
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    subject_dir = op.join(subjects_dir, subject)
+    mri_dir = op.join(subject_dir, 'mri')
+    T1_dir = op.join(mri_dir, volume)
+    T1_mgz = op.join(mri_dir, volume + '.mgz')
+    bem_dir = op.join(subject_dir, 'bem')
+    ws_dir = op.join(subject_dir, 'bem', 'watershed')
+
+    if not op.isdir(subject_dir):
+        raise RuntimeError('Could not find the MRI data directory "%s"'
+                           % subject_dir)
+    if not op.isdir(bem_dir):
+        os.makedirs(bem_dir)
+    if not op.isdir(T1_dir) and not op.isfile(T1_mgz):
+        raise RuntimeError('Could not find the MRI data')
+    if op.isdir(ws_dir):
+        if not overwrite:
+            raise RuntimeError('%s already exists. Use the --overwrite option'
+                               'to recreate it.' % ws_dir)
+        else:
+            shutil.rmtree(ws_dir)
+    # put together the command
+    cmd = ['mri_watershed']
+    if preflood:
+        cmd += ["-h",  "%s" % int(preflood)]
+
+    if gcaatlas:
+        cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +
+                '/average/RB_all_withskull_2007-08-08.gca',
+                subject_dir + '/mri/transforms/talairach_with_skull.lta']
+    elif atlas:
+        cmd += ['-atlas']
+    if op.exists(T1_mgz):
+        cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,
+                op.join(ws_dir, 'ws')]
+    else:
+        cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,
+                op.join(ws_dir, 'ws')]
+    # report and run
+    logger.info('\nRunning mri_watershed for BEM segmentation with the '
+                'following parameters:\n\n'
+                'SUBJECTS_DIR = %s\n'
+                'SUBJECT = %s\n'
+                'Results dir = %s\n' % (subjects_dir, subject, ws_dir))
+    os.makedirs(op.join(ws_dir, 'ws'))
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    #
+    os.chdir(ws_dir)
+    if op.isfile(T1_mgz):
+        # XXX : do this with python code
+        surfaces = [subject + '_brain_surface', subject +
+                    '_inner_skull_surface', subject + '_outer_skull_surface',
+                    subject + '_outer_skin_surface']
+        for s in surfaces:
+            cmd = ['mne_convert_surface', '--surf', s, '--mghmri', T1_mgz,
+                   '--surfout', s, "--replacegeom"]
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    os.chdir(bem_dir)
+    if op.isfile(subject + '-head.fif'):
+        os.remove(subject + '-head.fif')
+
+    # run the equivalent of mne_surf2bem
+    points, tris = read_surface(op.join(ws_dir,
+                                        subject + '_outer_skin_surface'))
+    points *= 1e-3
+    surf = dict(coord_frame=5, id=4, nn=None, np=len(points),
+                ntri=len(tris), rr=points, sigma=1, tris=tris)
+    write_bem_surfaces(subject + '-head.fif', surf)
+
+    logger.info('Created %s/%s-head.fif\n\nComplete.' % (bem_dir, subject))
+
+
+# ############################################################################
+# Read
+
+ at verbose
+def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
+    """Read the BEM surfaces from a FIF file
+
+    Parameters
+    ----------
+    fname : string
+        The name of the file containing the surfaces.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
+    s_id : int | None
+        If int, only read and return the surface with the given s_id.
+        An error will be raised if it doesn't exist. If None, all
+        surfaces are read and returned.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    surf: list | dict
+        A list of dictionaries that each contain a surface. If s_id
+        is not None, only the requested surface will be returned.
+
+    See Also
+    --------
+    write_bem_surfaces, write_bem_solution, make_bem_model
+    """
+    from .surface import _complete_surface_info
+    # Default coordinate frame
+    coord_frame = FIFF.FIFFV_COORD_MRI
+    # Open the file, create directory
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        # Find BEM
+        bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
+        if bem is None or len(bem) == 0:
+            raise ValueError('BEM data not found')
+
+        bem = bem[0]
+        # Locate all surfaces
+        bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
+        if bemsurf is None:
+            raise ValueError('BEM surface data not found')
+
+        logger.info('    %d BEM surfaces found' % len(bemsurf))
+        # Coordinate frame possibly at the top level
+        tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
+        if tag is not None:
+            coord_frame = tag.data
+        # Read all surfaces
+        if s_id is not None:
+            surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
+                    for bsurf in bemsurf]
+            surf = [s for s in surf if s is not None]
+            if not len(surf) == 1:
+                raise ValueError('surface with id %d not found' % s_id)
+        else:
+            surf = list()
+            for bsurf in bemsurf:
+                logger.info('    Reading a surface...')
+                this = _read_bem_surface(fid, bsurf, coord_frame)
+                surf.append(this)
+                logger.info('[done]')
+            logger.info('    %d BEM surfaces read' % len(surf))
+        if patch_stats:
+            for this in surf:
+                _complete_surface_info(this)
+    return surf[0] if s_id is not None else surf
+
+
+def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
+    """Read one bem surface
+    """
+    # fid should be open as a context manager here
+    res = dict()
+    # Read all the interesting stuff
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
+
+    if tag is None:
+        res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
+    else:
+        res['id'] = int(tag.data)
+
+    if s_id is not None and res['id'] != s_id:
+        return None
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
+    res['sigma'] = 1.0 if tag is None else float(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
+    if tag is None:
+        raise ValueError('Number of vertices not found')
+
+    res['np'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
+    if tag is None:
+        raise ValueError('Number of triangles not found')
+    res['ntri'] = int(tag.data)
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+    if tag is None:
+        tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
+        if tag is None:
+            res['coord_frame'] = def_coord_frame
+        else:
+            res['coord_frame'] = tag.data
+    else:
+        res['coord_frame'] = tag.data
+
+    # Vertices, normals, and triangles
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
+    if tag is None:
+        raise ValueError('Vertex data not found')
+
+    res['rr'] = tag.data.astype(np.float)  # XXX : double because of mayavi bug
+    if res['rr'].shape[0] != res['np']:
+        raise ValueError('Vertex information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+    if tag is None:
+        tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
+    if tag is None:
+        res['nn'] = list()
+    else:
+        res['nn'] = tag.data
+        if res['nn'].shape[0] != res['np']:
+            raise ValueError('Vertex normal information is incorrect')
+
+    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
+    if tag is None:
+        raise ValueError('Triangulation not found')
+
+    res['tris'] = tag.data - 1  # index start at 0 in Python
+    if res['tris'].shape[0] != res['ntri']:
+        raise ValueError('Triangulation information is incorrect')
+
+    return res
+
+
+ at verbose
+def read_bem_solution(fname, verbose=None):
+    """Read the BEM solution from a file
+
+    Parameters
+    ----------
+    fname : string
+        The file containing the BEM solution.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    bem : instance of ConductorModel
+        The BEM solution.
+
+    See Also
+    --------
+    write_bem_solution, read_bem_surfaces, write_bem_surfaces,
+    make_bem_solution
+    """
+    # mirrors fwd_bem_load_surfaces from fwd_bem_model.c
+    logger.info('Loading surfaces...')
+    bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
+    if len(bem_surfs) == 3:
+        logger.info('Three-layer model surfaces loaded.')
+        needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                           FIFF.FIFFV_BEM_SURF_ID_SKULL,
+                           FIFF.FIFFV_BEM_SURF_ID_BRAIN])
+        if not all(x['id'] in needed for x in bem_surfs):
+            raise RuntimeError('Could not find necessary BEM surfaces')
+        # reorder surfaces as necessary (shouldn't need to?)
+        reorder = [None] * 3
+        for x in bem_surfs:
+            reorder[np.where(x['id'] == needed)[0][0]] = x
+        bem_surfs = reorder
+    elif len(bem_surfs) == 1:
+        if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
+            raise RuntimeError('BEM Surfaces not found')
+        logger.info('Homogeneous model surface loaded.')
+
+    # convert from surfaces to solution
+    bem = ConductorModel(is_sphere=False, surfs=bem_surfs)
+    logger.info('\nLoading the solution matrix...\n')
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        # Find the BEM data
+        nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
+        if len(nodes) == 0:
+            raise RuntimeError('No BEM data in %s' % fname)
+        bem_node = nodes[0]
+
+        # Approximation method
+        tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
+        if tag is None:
+            raise RuntimeError('No BEM solution found in %s' % fname)
+        method = tag.data[0]
+        if method not in (FIFF.FIFFV_BEM_APPROX_CONST,
+                          FIFF.FIFFV_BEM_APPROX_LINEAR):
+            raise RuntimeError('Cannot handle BEM approximation method : %d'
+                               % method)
+
+        tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
+        dims = tag.data.shape
+        if len(dims) != 2:
+            raise RuntimeError('Expected a two-dimensional solution matrix '
+                               'instead of a %d dimensional one' % dims[0])
+
+        dim = 0
+        for surf in bem['surfs']:
+            if method == FIFF.FIFFV_BEM_APPROX_LINEAR:
+                dim += surf['np']
+            else:  # method == FIFF.FIFFV_BEM_APPROX_CONST
+                dim += surf['ntri']
+
+        if dims[0] != dim or dims[1] != dim:
+            raise RuntimeError('Expected a %d x %d solution matrix instead of '
+                               'a %d x %d one' % (dim, dim, dims[1], dims[0]))
+        sol = tag.data
+        nsol = dims[0]
+
+    bem['solution'] = sol
+    bem['nsol'] = nsol
+    bem['bem_method'] = method
+
+    # Gamma factors and multipliers
+    _add_gamma_multipliers(bem)
+    kind = {
+        FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',
+        FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',
+    }[bem['bem_method']]
+    logger.info('Loaded %s BEM solution from %s', kind, fname)
+    return bem
+
+
+def _add_gamma_multipliers(bem):
+    """Helper to add gamma and multipliers in-place"""
+    bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
+    # Dirty trick for the zero conductivity outside
+    sigma = np.r_[0.0, bem['sigma']]
+    bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
+    bem['field_mult'] = sigma[1:] - sigma[:-1]
+    # make sure subsequent "zip"s work correctly
+    assert len(bem['surfs']) == len(bem['field_mult'])
+    bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
+                    (sigma[1:] + sigma[:-1])[:, np.newaxis])
+
+
+_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+              'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
+              'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
+
+
+def _bem_find_surface(bem, id_):
+    """Find surface from already-loaded BEM"""
+    if isinstance(id_, string_types):
+        name = id_
+        id_ = _surf_dict[id_]
+    else:
+        name = _bem_explain_surface(id_)
+    idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
+    if len(idx) != 1:
+        raise RuntimeError('BEM model does not have the %s triangulation'
+                           % name.replace('_', ' '))
+    return bem['surfs'][idx[0]]
+
+
+def _bem_explain_surface(id_):
+    """Return a string corresponding to the given surface ID"""
+    _rev_dict = dict((val, key) for key, val in _surf_dict.items())
+    return _rev_dict[id_]
+
+
+# ############################################################################
+# Write
+
+ at deprecated('write_bem_surface is deprecated and will be removed in 0.11, '
+            'use write_bem_surfaces instead')
+def write_bem_surface(fname, surf):
+    """Write one bem surface
+
+    Parameters
+    ----------
+    fname : string
+        File to write
+    surf : dict
+        A surface structured as obtained with read_bem_surfaces
+
+    See Also
+    --------
+    read_bem_surfaces
+    """
+    write_bem_surfaces(fname, surf)
+
+
+def write_bem_surfaces(fname, surfs):
+    """Write BEM surfaces to a fiff file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to write.
+    surfs : dict | list of dict
+        The surfaces, or a single surface.
+    """
+    if isinstance(surfs, dict):
+        surfs = [surfs]
+    with start_file(fname) as fid:
+        start_block(fid, FIFF.FIFFB_BEM)
+        write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])
+        _write_bem_surfaces_block(fid, surfs)
+        end_block(fid, FIFF.FIFFB_BEM)
+        end_file(fid)
+
+
+def _write_bem_surfaces_block(fid, surfs):
+    """Helper to actually write bem surfaces"""
+    for surf in surfs:
+        start_block(fid, FIFF.FIFFB_BEM_SURF)
+        write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
+        write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
+        write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
+        write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
+        # index start at 0 in Python
+        write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,
+                         surf['tris'] + 1)
+        if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
+            write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])
+        end_block(fid, FIFF.FIFFB_BEM_SURF)
+
+
+def write_bem_solution(fname, bem):
+    """Write a BEM model with solution
+
+    Parameters
+    ----------
+    fname : str
+        The filename to use.
+    bem : instance of ConductorModel
+        The BEM model with solution to save.
+
+    See Also
+    --------
+    read_bem_solution
+    """
+    with start_file(fname) as fid:
+        start_block(fid, FIFF.FIFFB_BEM)
+        # Coordinate frame (mainly for backward compatibility)
+        write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,
+                  bem['surfs'][0]['coord_frame'])
+        # Surfaces
+        _write_bem_surfaces_block(fid, bem['surfs'])
+        # The potential solution
+        if 'solution' in bem:
+            if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
+                raise RuntimeError('Only linear collocation supported')
+            write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
+            write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
+                               bem['solution'])
+        end_block(fid, FIFF.FIFFB_BEM)
+        end_file(fid)
+
+
+# #############################################################################
+# Create 3-Layers BEM model from Flash MRI images
+
+def _prepare_env(subject, subjects_dir):
+    """Helper to prepare an env object for subprocess calls"""
+    env = os.environ.copy()
+    if not isinstance(subject, string_types):
+        raise TypeError('The subject argument must be set')
+    env['SUBJECT'] = subject
+    env['SUBJECTS_DIR'] = subjects_dir
+    mri_dir = op.join(subjects_dir, subject, 'mri')
+    bem_dir = op.join(subjects_dir, subject, 'bem')
+    return env, mri_dir, bem_dir
+
+
+ at verbose
+def convert_flash_mris(subject, flash30=True, convert=True, unwarp=False,
+                       subjects_dir=None, verbose=None):
+    """Convert DICOM files for use with make_flash_bem
+
+    Parameters
+    ----------
+    subject : str
+        Subject name.
+    flash30 : bool
+        Use 30-degree flip angle data.
+    convert : bool
+        Assume that the Flash MRI images have already been converted
+        to mgz files.
+    unwarp : bool
+        Run grad_unwarp with -unwarp option on each of the converted
+        data sets. It requires FreeSurfer's MATLAB toolbox to be properly
+        installed.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Before running this script do the following:
+    (unless convert=False is specified)
+
+        1. Copy all of your FLASH images in a single directory <source> and
+           create a directory <dest> to hold the output of mne_organize_dicom
+        2. cd to <dest> and run
+           $ mne_organize_dicom <source>
+           to create an appropriate directory structure
+        3. Create symbolic links to make flash05 and flash30 point to the
+           appropriate series:
+           $ ln -s <FLASH 5 series dir> flash05
+           $ ln -s <FLASH 30 series dir> flash30
+        4. cd to the directory where flash05 and flash30 links are
+        5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately
+        6. Run this script
+
+    This function assumes that the Freesurfer segmentation of the subject
+    has been completed. In particular, the T1.mgz and brain.mgz MRI volumes
+    should be, as usual, in the subject's mri directory.
+    """
+    env, mri_dir = _prepare_env(subject, subjects_dir)[:2]
+    # Step 1a : Data conversion to mgz format
+    if not op.exists(op.join(mri_dir, 'flash', 'parameter_maps')):
+        os.makedirs(op.join(mri_dir, 'flash', 'parameter_maps'))
+    echos_done = 0
+    if convert:
+        logger.info("\n---- Converting Flash images ----")
+        echos = ['001', '002', '003', '004', '005', '006', '007', '008']
+        if flash30:
+            flashes = ['05']
+        else:
+            flashes = ['05', '30']
+        #
+        missing = False
+        for flash in flashes:
+            for echo in echos:
+                if not op.isdir(op.join('flash' + flash, echo)):
+                    missing = True
+        if missing:
+            echos = ['002', '003', '004', '005', '006', '007', '008', '009']
+            for flash in flashes:
+                for echo in echos:
+                    if not op.isdir(op.join('flash' + flash, echo)):
+                        raise RuntimeError("Directory %s is missing."
+                                           % op.join('flash' + flash, echo))
+        #
+        for flash in flashes:
+            for echo in echos:
+                if not op.isdir(op.join('flash' + flash, echo)):
+                    raise RuntimeError("Directory %s is missing."
+                                       % op.join('flash' + flash, echo))
+                sample_file = glob.glob(op.join('flash' + flash, echo, '*'))[0]
+                dest_file = op.join(mri_dir, 'flash',
+                                    'mef' + flash + '_' + echo + '.mgz')
+                # do not redo if already present
+                if op.isfile(dest_file):
+                    logger.info("The file %s is already there")
+                else:
+                    cmd = ['mri_convert', sample_file, dest_file]
+                    run_subprocess(cmd, env=env, stdout=sys.stdout,
+                                   stderr=sys.stderr)
+                    echos_done += 1
+    # Step 1b : Run grad_unwarp on converted files
+    os.chdir(op.join(mri_dir, "flash"))
+    files = glob.glob("mef*.mgz")
+    if unwarp:
+        logger.info("\n---- Unwarp mgz data sets ----")
+        for infile in files:
+            outfile = infile.replace(".mgz", "u.mgz")
+            cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp',
+                   'true']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Clear parameter maps if some of the data were reconverted
+    if echos_done > 0 and op.exists("parameter_maps"):
+        shutil.rmtree("parameter_maps")
+        logger.info("\nParameter maps directory cleared")
+    if not op.exists("parameter_maps"):
+        os.makedirs("parameter_maps")
+    # Step 2 : Create the parameter maps
+    if flash30:
+        logger.info("\n---- Creating the parameter maps ----")
+        if unwarp:
+            files = glob.glob("mef05*u.mgz")
+        if len(os.listdir('parameter_maps')) == 0:
+            cmd = ['mri_ms_fitparms'] + files + ['parameter_maps']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+        else:
+            logger.info("Parameter maps were already computed")
+        # Step 3 : Synthesize the flash 5 images
+        logger.info("\n---- Synthesizing flash 5 images ----")
+        os.chdir('parameter_maps')
+        if not op.exists('flash5.mgz'):
+            cmd = ['mri_synthesize', '20 5 5', 'T1.mgz', 'PD.mgz',
+                   'flash5.mgz']
+            run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+            os.remove('flash5_reg.mgz')
+        else:
+            logger.info("Synthesized flash 5 volume is already there")
+    else:
+        logger.info("\n---- Averaging flash5 echoes ----")
+        os.chdir('parameter_maps')
+        if unwarp:
+            files = glob.glob("mef05*u.mgz")
+        else:
+            files = glob.glob("mef05*.mgz")
+        cmd = ['mri_average', '-noconform', files, 'flash5.mgz']
+        run_subprocess(cmd, env=env, stdout=sys.stdout)
+        if op.exists('flash5_reg.mgz'):
+            os.remove('flash5_reg.mgz')
+
+
+ at verbose
+def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None,
+                   verbose=None):
+    """Create 3-Layer BEM model from prepared flash MRI images
+
+    Parameters
+    -----------
+    subject : str
+        Subject name.
+    overwrite : bool
+        Write over existing .surf files in bem folder.
+    show : bool
+        Show surfaces to visually inspect all three BEM surfaces (recommended).
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    This program assumes that FreeSurfer and MNE are installed and
+    sourced properly.
+
+    This function extracts the BEM surfaces (outer skull, inner skull, and
+    outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+    degrees, in mgz format.
+
+    This function assumes that the flash images are available in the
+    folder mri/bem/flash within the freesurfer subject reconstruction.
+
+    See Also
+    --------
+    convert_flash_mris
+    """
+    from .viz.misc import plot_bem
+    env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir)
+
+    logger.info('\nProcessing the flash MRI data to produce BEM meshes with '
+                'the following parameters:\n'
+                'SUBJECTS_DIR = %s\n'
+                'SUBJECT = %s\n'
+                'Result dir = %s\n' % (subjects_dir, subject,
+                                       op.join(bem_dir, 'flash')))
+    # Step 4 : Register with MPRAGE
+    logger.info("\n---- Registering flash 5 with MPRAGE ----")
+    if not op.exists('flash5_reg.mgz'):
+        if op.exists(op.join(mri_dir, 'T1.mgz')):
+            ref_volume = op.join(mri_dir, 'T1.mgz')
+        else:
+            ref_volume = op.join(mri_dir, 'T1')
+        cmd = ['fsl_rigid_register', '-r', ref_volume, '-i', 'flash5.mgz',
+               '-o', 'flash5_reg.mgz']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("Registered flash 5 image is already there")
+    # Step 5a : Convert flash5 into COR
+    logger.info("\n---- Converting flash5 volume into COR format ----")
+    shutil.rmtree(op.join(mri_dir, 'flash5'), ignore_errors=True)
+    os.makedirs(op.join(mri_dir, 'flash5'))
+    cmd = ['mri_convert', 'flash5_reg.mgz', op.join(mri_dir, 'flash5')]
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Step 5b and c : Convert the mgz volumes into COR
+    os.chdir(mri_dir)
+    convert_T1 = False
+    if not op.isdir('T1') or len(glob.glob(op.join('T1', 'COR*'))) == 0:
+        convert_T1 = True
+    convert_brain = False
+    if not op.isdir('brain') or len(glob.glob(op.join('brain', 'COR*'))) == 0:
+        convert_brain = True
+    logger.info("\n---- Converting T1 volume into COR format ----")
+    if convert_T1:
+        if not op.isfile('T1.mgz'):
+            raise RuntimeError("Both T1 mgz and T1 COR volumes missing.")
+        os.makedirs('T1')
+        cmd = ['mri_convert', 'T1.mgz', 'T1']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("T1 volume is already in COR format")
+    logger.info("\n---- Converting brain volume into COR format ----")
+    if convert_brain:
+        if not op.isfile('brain.mgz'):
+            raise RuntimeError("Both brain mgz and brain COR volumes missing.")
+        os.makedirs('brain')
+        cmd = ['mri_convert', 'brain.mgz', 'brain']
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    else:
+        logger.info("Brain volume is already in COR format")
+    # Finally ready to go
+    logger.info("\n---- Creating the BEM surfaces ----")
+    cmd = ['mri_make_bem_surfaces', subject]
+    run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    logger.info("\n---- Converting the tri files into surf files ----")
+    os.chdir(bem_dir)
+    if not op.exists('flash'):
+        os.makedirs('flash')
+    os.chdir('flash')
+    surfs = ['inner_skull', 'outer_skull', 'outer_skin']
+    for surf in surfs:
+        shutil.move(op.join(bem_dir, surf + '.tri'), surf + '.tri')
+        cmd = ['mne_convert_surface', '--tri', surf + '.tri', '--surfout',
+               surf + '.surf', '--swap', '--mghmri',
+               op.join(subjects_dir, subject, 'mri', 'flash', 'parameter_maps',
+                       'flash5_reg.mgz')]
+        run_subprocess(cmd, env=env, stdout=sys.stdout, stderr=sys.stderr)
+    # Cleanup section
+    logger.info("\n---- Cleaning up ----")
+    os.chdir(bem_dir)
+    os.remove('inner_skull_tmp.tri')
+    os.chdir(mri_dir)
+    if convert_T1:
+        shutil.rmtree('T1')
+        logger.info("Deleted the T1 COR volume")
+    if convert_brain:
+        shutil.rmtree('brain')
+        logger.info("Deleted the brain COR volume")
+    shutil.rmtree('flash5')
+    logger.info("Deleted the flash5 COR volume")
+    # Create symbolic links to the .surf files in the bem folder
+    logger.info("\n---- Creating symbolic links ----")
+    os.chdir(bem_dir)
+    for surf in surfs:
+        surf = surf + '.surf'
+        if not overwrite and op.exists(surf):
+            skip_symlink = True
+        else:
+            if op.exists(surf):
+                os.remove(surf)
+            os.symlink(op.join('flash', surf), op.join(surf))
+            skip_symlink = False
+    if skip_symlink:
+        logger.info("Unable to create all symbolic links to .surf files "
+                    "in bem folder. Use --overwrite option to recreate them.")
+        dest = op.join(bem_dir, 'flash')
+    else:
+        logger.info("Symbolic links to .surf files created in bem folder")
+        dest = bem_dir
+    logger.info("\nThank you for waiting.\nThe BEM triangulations for this "
+                "subject are now available at:\n%s.\nWe hope the BEM meshes "
+                "created will facilitate your MEG and EEG data analyses."
+                % dest)
+    # Show computed BEM surfaces
+    if show:
+        plot_bem(subject=subject, subjects_dir=subjects_dir,
+                 orientation='coronal', slices=None, show=True)
diff --git a/mne/channels.py b/mne/channels.py
deleted file mode 100644
index d50dee2..0000000
--- a/mne/channels.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#          Denis Engmeann <denis.engemann at gmail.com>
-#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
-#
-# License: BSD (3-clause)
-
-import numpy as np
-from scipy.io import loadmat
-from scipy import sparse
-
-from .externals.six import string_types
-
-from .utils import verbose, logger
-from .io.pick import channel_type, pick_info
-from .io.constants import FIFF
-
-
-def _get_meg_system(info):
-    """Educated guess for the helmet type based on channels"""
-    system = '306m'
-    for ch in info['chs']:
-        if ch['kind'] == FIFF.FIFFV_MEG_CH:
-            coil_type = ch['coil_type'] & 0xFFFF
-            if coil_type == FIFF.FIFFV_COIL_NM_122:
-                system = '122m'
-                break
-            elif coil_type // 1000 == 3:  # All Vectorview coils are 30xx
-                system = '306m'
-                break
-            elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
-                  coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
-                nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
-                               for c in info['chs']])
-                system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
-                break
-            elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
-                system = 'CTF_275'
-                break
-            elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
-                system = 'KIT'
-                break
-            elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
-                system = 'BabySQUID'
-                break
-    return system
-
-
-def _contains_ch_type(info, ch_type):
-    """Check whether a certain channel type is in an info object
-
-    Parameters
-    ---------
-    info : instance of mne.io.meas_info.Info
-        The measurement information.
-    ch_type : str
-        the channel type to be checked for
-
-    Returns
-    -------
-    has_ch_type : bool
-        Whether the channel type is present or not.
-    """
-    if not isinstance(ch_type, string_types):
-        raise ValueError('`ch_type` is of class {actual_class}. It must be '
-                         '`str`'.format(actual_class=type(ch_type)))
-
-    valid_channel_types = ('grad mag eeg stim eog emg ecg ref_meg resp '
-                           'exci ias syst misc').split()
-
-    if ch_type not in valid_channel_types:
-        msg = ('The ch_type passed ({passed}) is not valid. '
-               'it must be {valid}')
-        raise ValueError(msg.format(passed=ch_type,
-                                    valid=' or '.join(valid_channel_types)))
-    return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
-
-
- at verbose
-def equalize_channels(candidates, verbose=None):
-    """Equalize channel picks for a collection of MNE-Python objects
-
-    Parameters
-    ----------
-    candidates : list
-        list Raw | Epochs | Evoked.
-    verbose : None | bool
-        whether to be verbose or not.
-
-    Note. This function operates inplace.
-    """
-    from .io.base import _BaseRaw
-    from .epochs import Epochs
-    from .evoked import Evoked
-    from .time_frequency import AverageTFR
-
-    if not all([isinstance(c, (_BaseRaw, Epochs, Evoked, AverageTFR))
-                for c in candidates]):
-        valid = ['Raw', 'Epochs', 'Evoked', 'AverageTFR']
-        raise ValueError('candidates must be ' + ' or '.join(valid))
-
-    chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
-    chan_template = candidates[chan_max_idx].ch_names
-    logger.info('Identiying common channels ...')
-    channels = [set(c.ch_names) for c in candidates]
-    common_channels = set(chan_template).intersection(*channels)
-    dropped = list()
-    for c in candidates:
-        drop_them = list(set(c.ch_names) - common_channels)
-        if drop_them:
-            c.drop_channels(drop_them)
-            dropped.extend(drop_them)
-    if dropped:
-        dropped = list(set(dropped))
-        logger.info('Dropped the following channels:\n%s' % dropped)
-    else:
-        logger.info('all channels are corresponding, nothing to do.')
-
-
-class ContainsMixin(object):
-    """Mixin class for Raw, Evoked, Epochs
-    """
-    def __contains__(self, ch_type):
-        """Check channel type membership"""
-        if ch_type == 'meg':
-            has_ch_type = (_contains_ch_type(self.info, 'mag') or
-                           _contains_ch_type(self.info, 'grad'))
-        else:
-            has_ch_type = _contains_ch_type(self.info, ch_type)
-        return has_ch_type
-
-
-class PickDropChannelsMixin(object):
-    """Mixin class for Raw, Evoked, Epochs
-    """
-    def pick_channels(self, ch_names, copy=False):
-        """Pick some channels
-
-        Parameters
-        ----------
-        ch_names : list
-            The list of channels to select.
-        copy : bool
-            If True, returns new instance. Else, modifies in place. Defaults to
-            False.
-        """
-        inst = self.copy() if copy else self
-
-        idx = [inst.ch_names.index(c) for c in ch_names if c in inst.ch_names]
-        inst._pick_drop_channels(idx)
-
-        return inst
-
-    def drop_channels(self, ch_names, copy=False):
-        """Drop some channels
-
-        Parameters
-        ----------
-        ch_names : list
-            The list of channels to remove.
-        copy : bool
-            If True, returns new instance. Else, modifies in place. Defaults to
-            False.
-        """
-        inst = self.copy() if copy else self
-
-        bad_idx = [inst.ch_names.index(c) for c in ch_names
-                   if c in inst.ch_names]
-        idx = np.setdiff1d(np.arange(len(inst.ch_names)), bad_idx)
-        inst._pick_drop_channels(idx)
-
-        return inst
-
-    def _pick_drop_channels(self, idx):
-        # avoid circular imports
-        from .io.base import _BaseRaw
-        from .epochs import Epochs
-        from .evoked import Evoked
-        from .time_frequency import AverageTFR
-
-        if isinstance(self, _BaseRaw):
-            if not self.preload:
-                raise RuntimeError('Raw data must be preloaded to drop or pick'
-                                   ' channels')
-
-        inst_has = lambda attr: getattr(self, attr, None) is not None
-
-        if inst_has('picks'):
-            self.picks = self.picks[idx]
-
-        if inst_has('cals'):
-            self.cals = self.cals[idx]
-
-        self.info = pick_info(self.info, idx, copy=False)
-
-        if inst_has('_projector'):
-            self._projector = self._projector[idx][:, idx]
-
-        if isinstance(self, _BaseRaw) and inst_has('_data'):
-            self._data = self._data[idx, :]
-        elif isinstance(self, Epochs) and inst_has('_data'):
-            self._data = self._data[:, idx, :]
-        elif isinstance(self, AverageTFR) and inst_has('data'):
-            self.data = self.data[idx, :, :]
-        elif isinstance(self, Evoked):
-            self.data = self.data[idx, :]
-
-
-def rename_channels(info, mapping):
-    """Rename channels and optionally change the sensor type.
-
-    Note: This only changes between the following sensor types: eeg, eog,
-    emg, ecg, and misc. It also cannot change to eeg.
-
-    Parameters
-    ----------
-    info : dict
-        Measurement info.
-    mapping : dict
-        a dictionary mapping the old channel to a new channel name {'EEG061' :
-        'EEG161'}. If changing the sensor type, make the new name a tuple with
-        the name (str) and the new channel type (str)
-        {'EEG061',('EOG061','eog')}.
-    """
-    human2fiff = {'eog': FIFF.FIFFV_EOG_CH,
-                  'emg': FIFF.FIFFV_EMG_CH,
-                  'ecg': FIFF.FIFFV_ECG_CH,
-                  'misc': FIFF.FIFFV_MISC_CH}
-
-    bads, chs = info['bads'], info['chs']
-    ch_names = info['ch_names']
-    new_names, new_kinds, new_bads = list(), list(), list()
-
-    # first check and assemble clean mappings of index and name
-    for ch_name, new_name in mapping.items():
-        if ch_name not in ch_names:
-            raise ValueError("This channel name (%s) doesn't exist in info."
-                             % ch_name)
-
-        c_ind = ch_names.index(ch_name)
-        if not isinstance(new_name, (string_types, tuple)):
-            raise ValueError('Your mapping is not configured properly. '
-                             'Please see the help: mne.rename_channels?')
-
-        elif isinstance(new_name, tuple):  # name and type change
-            new_name, new_type = new_name  # unpack
-            if new_type not in human2fiff:
-                raise ValueError('This function cannot change to this '
-                                 'channel type: %s.' % new_type)
-            new_kinds.append((c_ind, human2fiff[new_type]))
-
-        if new_name in ch_names:
-            raise ValueError('The new name ({new}) already exists. Choose a '
-                             'unique name'.format(new=new_name))
-
-        new_names.append((c_ind, new_name))
-        if ch_name in bads:  # check bads
-            new_bads.append((bads.index(ch_name), new_name))
-
-    # Reset ch_names and Check that all the channel names are unique.
-    for key, collection in [('ch_name', new_names), ('kind', new_kinds)]:
-        for c_ind, new_name in collection:
-            chs[c_ind][key] = new_name
-    for c_ind, new_name in new_bads:
-        bads[c_ind] = new_name
-
-    # reference magic, please don't change (with the local binding
-    # it doesn't work)
-    info['ch_names'] = [c['ch_name'] for c in chs]
-
-
-def _recursive_flatten(cell, dtype):
-    """Helper to unpack mat files in Python"""
-    while not isinstance(cell[0], dtype):
-        cell = [c for d in cell for c in d]
-    return cell
-
-
-def read_ch_connectivity(fname, picks=None):
-    """Parse FieldTrip neighbors .mat file
-
-    Parameters
-    ----------
-    fname : str
-        The file name.
-    picks : array-like of int, shape (n_channels)
-        The indices of the channels to include. Must match the template.
-        Defaults to None.
-
-    Returns
-    -------
-    ch_connectivity : scipy.sparse matrix
-        The connectivity matrix.
-    """
-    nb = loadmat(fname)['neighbours']
-    ch_names = _recursive_flatten(nb['label'], string_types)
-    neighbors = [_recursive_flatten(c, string_types) for c in
-                 nb['neighblabel'].flatten()]
-    assert len(ch_names) == len(neighbors)
-    if picks is not None:
-        if max(picks) >= len(ch_names):
-            raise ValueError('The picks must be compatible with '
-                             'channels. Found a pick ({}) which exceeds '
-                             'the channel range ({})'
-                             .format(max(picks), len(ch_names)))
-    connectivity = ch_neighbor_connectivity(ch_names, neighbors)
-    if picks is not None:
-        # picking before constructing matrix is buggy
-        connectivity = connectivity[picks][:, picks]
-    return connectivity
-
-
-def ch_neighbor_connectivity(ch_names, neighbors):
-    """Compute sensor connectivity matrix
-
-    Parameters
-    ----------
-    ch_names : list of str
-        The channel names.
-    neighbors : list of list
-        A list of list of channel names. The neighbors to
-        which the channels in ch_names are connected with.
-        Must be of the same length as ch_names.
-    Returns
-    -------
-    ch_connectivity : scipy.sparse matrix
-        The connectivity matrix.
-    """
-    if len(ch_names) != len(neighbors):
-        raise ValueError('`ch_names` and `neighbors` must '
-                         'have the same length')
-    set_neighbors = set([c for d in neighbors for c in d])
-    rest = set(ch_names) - set_neighbors
-    if len(rest) > 0:
-        raise ValueError('Some of your neighbors are not present in the '
-                         'list of channel names')
-
-    for neigh in neighbors:
-        if (not isinstance(neigh, list) and
-           not all(isinstance(c, string_types) for c in neigh)):
-            raise ValueError('`neighbors` must be a list of lists of str')
-
-    ch_connectivity = np.eye(len(ch_names), dtype=bool)
-    for ii, neigbs in enumerate(neighbors):
-        ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
-
-    ch_connectivity = sparse.csr_matrix(ch_connectivity)
-    return ch_connectivity
diff --git a/mne/channels/__init__.py b/mne/channels/__init__.py
new file mode 100644
index 0000000..025538f
--- /dev/null
+++ b/mne/channels/__init__.py
@@ -0,0 +1,11 @@
+"""
+Module dedicated to the manipulation of channels,
+setting of sensors locations used for processing and plotting.
+"""
+
+from .layout import (Layout, make_eeg_layout, make_grid_layout, read_layout,
+                     find_layout, generate_2d_layout)
+from .montage import read_montage, read_dig_montage, Montage, DigMontage
+
+from .channels import (equalize_channels, rename_channels, fix_mag_coil_types,
+                       read_ch_connectivity, _get_ch_type)
diff --git a/mne/channels/channels.py b/mne/channels/channels.py
new file mode 100644
index 0000000..514930d
--- /dev/null
+++ b/mne/channels/channels.py
@@ -0,0 +1,783 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os
+import os.path as op
+import warnings
+
+import numpy as np
+from scipy import sparse
+
+from ..externals.six import string_types
+
+from ..utils import verbose, logger
+from ..io.pick import (channel_type, pick_info, pick_types,
+                       _check_excludes_includes)
+from ..io.constants import FIFF
+
+
+def _get_meg_system(info):
+    """Educated guess for the helmet type based on channels"""
+    system = '306m'
+    for ch in info['chs']:
+        if ch['kind'] == FIFF.FIFFV_MEG_CH:
+            coil_type = ch['coil_type'] & 0xFFFF
+            if coil_type == FIFF.FIFFV_COIL_NM_122:
+                system = '122m'
+                break
+            elif coil_type // 1000 == 3:  # All Vectorview coils are 30xx
+                system = '306m'
+                break
+            elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
+                  coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
+                nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
+                               for c in info['chs']])
+                system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
+                system = 'CTF_275'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
+                system = 'KIT'
+                break
+            elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
+                system = 'BabySQUID'
+                break
+    return system
+
+
+def _contains_ch_type(info, ch_type):
+    """Check whether a certain channel type is in an info object
+
+    Parameters
+    ---------
+    info : instance of mne.io.meas_info.Info
+        The measurement information.
+    ch_type : str
+        the channel type to be checked for
+
+    Returns
+    -------
+    has_ch_type : bool
+        Whether the channel type is present or not.
+    """
+    if not isinstance(ch_type, string_types):
+        raise ValueError('`ch_type` is of class {actual_class}. It must be '
+                         '`str`'.format(actual_class=type(ch_type)))
+
+    valid_channel_types = ['grad', 'mag', 'planar1', 'planar2', 'eeg', 'stim',
+                           'eog', 'emg', 'ecg', 'ref_meg', 'resp', 'exci',
+                           'ias', 'syst', 'seeg', 'misc']
+
+    if ch_type not in valid_channel_types:
+        raise ValueError('ch_type must be one of %s, not "%s"'
+                         % (valid_channel_types, ch_type))
+    if info is None:
+        raise ValueError('Cannot check for channels of type "%s" because info '
+                         'is None' % (ch_type,))
+    return ch_type in [channel_type(info, ii) for ii in range(info['nchan'])]
+
+
+def _get_ch_type(inst, ch_type):
+    """Helper to choose a single channel type (usually for plotting)
+
+    Usually used in plotting to plot a single datatype, e.g. look for mags,
+    then grads, then ... to plot.
+    """
+    if ch_type is None:
+        for type_ in ['mag', 'grad', 'planar1', 'planar2', 'eeg']:
+            if type_ in inst:
+                ch_type = type_
+                break
+        else:
+            raise RuntimeError('No plottable channel types found')
+    return ch_type
+
+
+ at verbose
+def equalize_channels(candidates, verbose=None):
+    """Equalize channel picks for a collection of MNE-Python objects
+
+    Parameters
+    ----------
+    candidates : list
+        list Raw | Epochs | Evoked.
+    verbose : None | bool
+        whether to be verbose or not.
+
+    Notes
+    -----
+    This function operates inplace.
+    """
+    from ..io.base import _BaseRaw
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+    from ..time_frequency import AverageTFR
+
+    if not all(isinstance(c, (_BaseRaw, _BaseEpochs, Evoked, AverageTFR))
+               for c in candidates):
+        valid = ['Raw', 'Epochs', 'Evoked', 'AverageTFR']
+        raise ValueError('candidates must be ' + ' or '.join(valid))
+
+    chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
+    chan_template = candidates[chan_max_idx].ch_names
+    logger.info('Identiying common channels ...')
+    channels = [set(c.ch_names) for c in candidates]
+    common_channels = set(chan_template).intersection(*channels)
+    dropped = list()
+    for c in candidates:
+        drop_them = list(set(c.ch_names) - common_channels)
+        if drop_them:
+            c.drop_channels(drop_them)
+            dropped.extend(drop_them)
+    if dropped:
+        dropped = list(set(dropped))
+        logger.info('Dropped the following channels:\n%s' % dropped)
+    else:
+        logger.info('all channels are corresponding, nothing to do.')
+
+
+class ContainsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def __contains__(self, ch_type):
+        """Check channel type membership"""
+        if ch_type == 'meg':
+            has_ch_type = (_contains_ch_type(self.info, 'mag') or
+                           _contains_ch_type(self.info, 'grad'))
+        else:
+            has_ch_type = _contains_ch_type(self.info, ch_type)
+        return has_ch_type
+
+
+class SetChannelsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+    def _get_channel_positions(self, picks=None):
+        """Gets channel locations from info
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            Indices of channels to include. If None (default), all meg and eeg
+            channels that are available are returned (bad channels excluded).
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True)
+        chs = self.info['chs']
+        pos = np.array([chs[k]['loc'][:3] for k in picks])
+        n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
+        if n_zero > 1:  # XXX some systems have origin (0, 0, 0)
+            raise ValueError('Could not extract channel positions for '
+                             '{} channels'.format(n_zero))
+        return pos
+
+    def _set_channel_positions(self, pos, names):
+        """Update channel locations in info
+
+        Parameters
+        ----------
+        pos : array-like | np.ndarray, shape (n_points, 3)
+            The channel positions to be set.
+        names : list of str
+            The names of the channels to be set.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        if len(pos) != len(names):
+            raise ValueError('Number of channel positions not equal to '
+                             'the number of names given.')
+        pos = np.asarray(pos, dtype=np.float)
+        if pos.shape[-1] != 3 or pos.ndim != 2:
+            msg = ('Channel positions must have the shape (n_points, 3) '
+                   'not %s.' % (pos.shape,))
+            raise ValueError(msg)
+        for name, p in zip(names, pos):
+            if name in self.ch_names:
+                idx = self.ch_names.index(name)
+                self.info['chs'][idx]['loc'][:3] = p
+            else:
+                msg = ('%s was not found in the info. Cannot be updated.'
+                       % name)
+                raise ValueError(msg)
+
+    def set_channel_types(self, mapping):
+        """Define the sensor type of channels.
+
+        Note: The following sensor types are accepted:
+            ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst
+
+        Parameters
+        ----------
+        mapping : dict
+            a dictionary mapping a channel to a sensor type (str)
+            {'EEG061': 'eog'}.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
+                      'eeg': FIFF.FIFFV_EEG_CH,
+                      'emg': FIFF.FIFFV_EMG_CH,
+                      'eog': FIFF.FIFFV_EOG_CH,
+                      'exci': FIFF.FIFFV_EXCI_CH,
+                      'ias': FIFF.FIFFV_IAS_CH,
+                      'misc': FIFF.FIFFV_MISC_CH,
+                      'resp': FIFF.FIFFV_RESP_CH,
+                      'seeg': FIFF.FIFFV_SEEG_CH,
+                      'stim': FIFF.FIFFV_STIM_CH,
+                      'syst': FIFF.FIFFV_SYST_CH}
+
+        human2unit = {'ecg': FIFF.FIFF_UNIT_V,
+                      'eeg': FIFF.FIFF_UNIT_V,
+                      'emg': FIFF.FIFF_UNIT_V,
+                      'eog': FIFF.FIFF_UNIT_V,
+                      'exci': FIFF.FIFF_UNIT_NONE,
+                      'ias': FIFF.FIFF_UNIT_NONE,
+                      'misc': FIFF.FIFF_UNIT_V,
+                      'resp': FIFF.FIFF_UNIT_NONE,
+                      'seeg': FIFF.FIFF_UNIT_V,
+                      'stim': FIFF.FIFF_UNIT_NONE,
+                      'syst': FIFF.FIFF_UNIT_NONE}
+
+        unit2human = {FIFF.FIFF_UNIT_V: 'V',
+                      FIFF.FIFF_UNIT_NONE: 'NA'}
+        ch_names = self.info['ch_names']
+
+        # first check and assemble clean mappings of index and name
+        for ch_name, ch_type in mapping.items():
+            if ch_name not in ch_names:
+                raise ValueError("This channel name (%s) doesn't exist in "
+                                 "info." % ch_name)
+
+            c_ind = ch_names.index(ch_name)
+            if ch_type not in human2fiff:
+                raise ValueError('This function cannot change to this '
+                                 'channel type: %s. Accepted channel types '
+                                 'are %s.' % (ch_type,
+                                              ", ".join(human2unit.keys())))
+            # Set sensor type
+            self.info['chs'][c_ind]['kind'] = human2fiff[ch_type]
+            unit_old = self.info['chs'][c_ind]['unit']
+            unit_new = human2unit[ch_type]
+            if unit_old != human2unit[ch_type]:
+                warnings.warn("The unit for Channel %s has changed "
+                              "from %s to %s." % (ch_name,
+                                                  unit2human[unit_old],
+                                                  unit2human[unit_new]))
+            self.info['chs'][c_ind]['unit'] = human2unit[ch_type]
+            if ch_type in ['eeg', 'seeg']:
+                self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_EEG
+            else:
+                self.info['chs'][c_ind]['coil_type'] = FIFF.FIFFV_COIL_NONE
+
+    def rename_channels(self, mapping):
+        """Rename channels.
+
+        Parameters
+        ----------
+        mapping : dict | callable
+            a dictionary mapping the old channel to a new channel name
+            e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
+            that takes and returns a string (new in version 0.10.0).
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        rename_channels(self.info, mapping)
+
+    def set_montage(self, montage):
+        """Set EEG sensor configuration
+
+        Parameters
+        ----------
+        montage : instance of Montage or DigMontage
+
+        Notes
+        -----
+        Operates in place.
+
+        .. versionadded:: 0.9.0
+        """
+        from .montage import _set_montage
+        _set_montage(self.info, montage)
+
+
+class UpdateChannelsMixin(object):
+    """Mixin class for Raw, Evoked, Epochs, AverageTFR
+    """
+    def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
+                   ecg=False, emg=False, ref_meg='auto', misc=False,
+                   resp=False, chpi=False, exci=False, ias=False, syst=False,
+                   seeg=False, include=[], exclude='bads', selection=None,
+                   copy=False):
+        """Pick some channels by type and names
+
+        Parameters
+        ----------
+        meg : bool | str
+            If True include all MEG channels. If False include None
+            If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
+            only magnetometers, all gradiometers, or a specific type of
+            gradiometer.
+        eeg : bool
+            If True include EEG channels.
+        stim : bool
+            If True include stimulus channels.
+        eog : bool
+            If True include EOG channels.
+        ecg : bool
+            If True include ECG channels.
+        emg : bool
+            If True include EMG channels.
+        ref_meg: bool | str
+            If True include CTF / 4D reference channels. If 'auto', the
+            reference channels are only included if compensations are present.
+        misc : bool
+            If True include miscellaneous analog channels.
+        resp : bool
+            If True include response-trigger channel. For some MEG systems this
+            is separate from the stim channel.
+        chpi : bool
+            If True include continuous HPI coil channels.
+        exci : bool
+            Flux excitation channel used to be a stimulus channel.
+        ias : bool
+            Internal Active Shielding data (maybe on Triux only).
+        syst : bool
+            System status channel information (on Triux systems only).
+        seeg : bool
+            Stereotactic EEG channels.
+        include : list of string
+            List of additional channels to include. If empty do not include
+            any.
+        exclude : list of string | str
+            List of channels to exclude. If 'bads' (default), exclude channels
+            in ``info['bads']``.
+        selection : list of string
+            Restrict sensor channels (MEG, EEG) to this list of channel names.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+        idx = pick_types(
+            self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
+            ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
+            ias=ias, syst=syst, seeg=seeg, include=include, exclude=exclude,
+            selection=selection)
+        inst._pick_drop_channels(idx)
+        return inst
+
+    def pick_channels(self, ch_names, copy=False):
+        """Pick some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to select.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        See Also
+        --------
+        drop_channels
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+        _check_excludes_includes(ch_names)
+
+        idx = [inst.ch_names.index(c) for c in ch_names if c in inst.ch_names]
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def drop_channels(self, ch_names, copy=False):
+        """Drop some channels
+
+        Parameters
+        ----------
+        ch_names : list
+            The list of channels to remove.
+        copy : bool
+            If True, returns new instance. Else, modifies in place. Defaults to
+            False.
+
+        See Also
+        --------
+        pick_channels
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        inst = self.copy() if copy else self
+
+        bad_idx = [inst.ch_names.index(c) for c in ch_names
+                   if c in inst.ch_names]
+        idx = np.setdiff1d(np.arange(len(inst.ch_names)), bad_idx)
+        inst._pick_drop_channels(idx)
+
+        return inst
+
+    def _pick_drop_channels(self, idx):
+        # avoid circular imports
+        from ..io.base import _BaseRaw
+        from ..epochs import _BaseEpochs
+        from ..evoked import Evoked
+        from ..time_frequency import AverageTFR
+
+        if isinstance(self, (_BaseRaw, _BaseEpochs)):
+            if not self.preload:
+                raise RuntimeError('If Raw or Epochs, data must be preloaded '
+                                   'to drop or pick channels')
+
+        def inst_has(attr):
+            return getattr(self, attr, None) is not None
+
+        if inst_has('picks'):
+            self.picks = self.picks[idx]
+
+        if inst_has('_cals'):
+            self._cals = self._cals[idx]
+
+        self.info = pick_info(self.info, idx, copy=False)
+
+        if inst_has('_projector'):
+            self._projector = self._projector[idx][:, idx]
+
+        if isinstance(self, _BaseRaw) and inst_has('_data'):
+            self._data = self._data.take(idx, axis=0)
+        elif isinstance(self, _BaseEpochs) and inst_has('_data'):
+            self._data = self._data.take(idx, axis=1)
+        elif isinstance(self, AverageTFR) and inst_has('data'):
+            self.data = self.data.take(idx, axis=0)
+        elif isinstance(self, Evoked):
+            self.data = self.data.take(idx, axis=0)
+
+    def add_channels(self, add_list, copy=False):
+        """Append new channels to the instance.
+
+        Parameters
+        ----------
+        add_list : list
+            A list of objects to append to self. Must contain all the same
+            type as the current object
+        copy : bool
+            Whether to return a new instance or modify in place
+
+        Returns
+        -------
+        out : MNE object of type(self)
+            An object with new channels appended (will be the same
+            object if copy==False)
+        """
+        # avoid circular imports
+        from ..io.base import _BaseRaw
+        from ..epochs import _BaseEpochs
+        from ..io.meas_info import _merge_info
+
+        if not isinstance(add_list, (list, tuple)):
+            raise AssertionError('Input must be a list or tuple of objs')
+
+        # Object-specific checks
+        if isinstance(self, (_BaseRaw, _BaseEpochs)):
+            if not all([inst.preload for inst in add_list] + [self.preload]):
+                raise AssertionError('All data must be preloaded')
+            data_name = '_data'
+            if isinstance(self, _BaseRaw):
+                con_axis = 0
+                comp_class = _BaseRaw
+            elif isinstance(self, _BaseEpochs):
+                con_axis = 1
+                comp_class = _BaseEpochs
+        else:
+            data_name = 'data'
+            con_axis = 0
+            comp_class = type(self)
+        if not all(isinstance(inst, comp_class) for inst in add_list):
+            raise AssertionError('All input data must be of same type')
+        data = [getattr(inst, data_name) for inst in [self] + add_list]
+
+        # Make sure that all dimensions other than channel axis are the same
+        compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
+        shapes = np.array([dat.shape for dat in data])[:, compare_axes]
+        if not ((shapes[0] - shapes) == 0).all():
+            raise AssertionError('All dimensions except channels must match')
+
+        # Create final data / info objects
+        data = np.concatenate(data, axis=con_axis)
+        infos = [self.info] + [inst.info for inst in add_list]
+        new_info = _merge_info(infos)
+
+        # Now update the attributes
+        if copy is True:
+            out = self.copy()
+        else:
+            out = self
+        setattr(out, data_name, data)
+        out.info = new_info
+        if isinstance(self, _BaseRaw):
+            out._cals = np.concatenate([getattr(inst, '_cals')
+                                        for inst in [self] + add_list])
+        return out
+
+
+class InterpolationMixin(object):
+    """Mixin class for Raw, Evoked, Epochs
+    """
+
+    def interpolate_bads(self, reset_bads=True, mode='accurate'):
+        """Interpolate bad MEG and EEG channels.
+
+        Operates in place.
+
+        Parameters
+        ----------
+        reset_bads : bool
+            If True, remove the bads from info.
+        mode : str
+            Either `'accurate'` or `'fast'`, determines the quality of the
+            Legendre polynomial expansion used for interpolation of MEG
+            channels.
+
+        Returns
+        -------
+        self : mne.io.Raw, mne.Epochs or mne.Evoked
+            The interpolated data.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
+
+        if getattr(self, 'preload', None) is False:
+            raise ValueError('Data must be preloaded.')
+
+        _interpolate_bads_eeg(self)
+        _interpolate_bads_meg(self, mode=mode)
+
+        if reset_bads is True:
+            self.info['bads'] = []
+
+        return self
+
+
+def rename_channels(info, mapping):
+    """Rename channels.
+
+    Parameters
+    ----------
+    info : dict
+        Measurement info.
+    mapping : dict | callable
+        a dictionary mapping the old channel to a new channel name
+        e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
+        that takes and returns a string (new in version 0.10.0).
+    """
+    info._check_consistency()
+    bads = list(info['bads'])  # make our own local copies
+    ch_names = list(info['ch_names'])
+
+    # first check and assemble clean mappings of index and name
+    if isinstance(mapping, dict):
+        orig_names = sorted(list(mapping.keys()))
+        missing = [orig_name not in ch_names for orig_name in orig_names]
+        if any(missing):
+            raise ValueError("Channel name(s) in mapping missing from info: "
+                             "%s" % np.array(orig_names)[np.array(missing)])
+        new_names = [(ch_names.index(ch_name), new_name)
+                     for ch_name, new_name in mapping.items()]
+    elif callable(mapping):
+        new_names = [(ci, mapping(ch_name))
+                     for ci, ch_name in enumerate(ch_names)]
+    else:
+        raise ValueError('mapping must be callable or dict, not %s'
+                         % (type(mapping),))
+
+    # check we got all strings out of the mapping
+    if any(not isinstance(new_name[1], string_types)
+           for new_name in new_names):
+        raise ValueError('New channel mapping must only be to strings')
+
+    # do the remapping locally
+    for c_ind, new_name in new_names:
+        for bi, bad in enumerate(bads):
+            if bad == ch_names[c_ind]:
+                bads[bi] = new_name
+        ch_names[c_ind] = new_name
+
+    # check that all the channel names are unique
+    if len(ch_names) != len(np.unique(ch_names)):
+        raise ValueError('New channel names are not unique, renaming failed')
+
+    # do the reampping in info
+    info['bads'] = bads
+    info['ch_names'] = ch_names
+    for ch, ch_name in zip(info['chs'], ch_names):
+        ch['ch_name'] = ch_name
+    info._check_consistency()
+
+
+def _recursive_flatten(cell, dtype):
+    """Helper to unpack mat files in Python"""
+    while not isinstance(cell[0], dtype):
+        cell = [c for d in cell for c in d]
+    return cell
+
+
+def read_ch_connectivity(fname, picks=None):
+    """Parse FieldTrip neighbors .mat file
+
+    More information on these neighbor definitions can be found on the
+    related FieldTrip documentation pages:
+    http://fieldtrip.fcdonders.nl/template/neighbours
+
+    Parameters
+    ----------
+    fname : str
+        The file name. Example: 'neuromag306mag', 'neuromag306planar',
+        'ctf275', 'biosemi64', etc.
+    picks : array-like of int, shape (n_channels,)
+        The indices of the channels to include. Must match the template.
+        Defaults to None.
+
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    ch_names : list
+        The list of channel names present in connectivity matrix.
+    """
+    from scipy.io import loadmat
+    if not op.isabs(fname):
+        templates_dir = op.realpath(op.join(op.dirname(__file__),
+                                            'data', 'neighbors'))
+        templates = os.listdir(templates_dir)
+        for f in templates:
+            if f == fname:
+                break
+            if f == fname + '_neighb.mat':
+                fname += '_neighb.mat'
+                break
+        else:
+            raise ValueError('I do not know about this neighbor '
+                             'template: "{}"'.format(fname))
+
+        fname = op.join(templates_dir, fname)
+
+    nb = loadmat(fname)['neighbours']
+    ch_names = _recursive_flatten(nb['label'], string_types)
+    neighbors = [_recursive_flatten(c, string_types) for c in
+                 nb['neighblabel'].flatten()]
+    assert len(ch_names) == len(neighbors)
+    if picks is not None:
+        if max(picks) >= len(ch_names):
+            raise ValueError('The picks must be compatible with '
+                             'channels. Found a pick ({}) which exceeds '
+                             'the channel range ({})'
+                             .format(max(picks), len(ch_names)))
+    connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
+    if picks is not None:
+        # picking before constructing matrix is buggy
+        connectivity = connectivity[picks][:, picks]
+        ch_names = [ch_names[p] for p in picks]
+    return connectivity, ch_names
+
+
+def _ch_neighbor_connectivity(ch_names, neighbors):
+    """Compute sensor connectivity matrix
+
+    Parameters
+    ----------
+    ch_names : list of str
+        The channel names.
+    neighbors : list of list
+        A list of list of channel names. The neighbors to
+        which the channels in ch_names are connected with.
+        Must be of the same length as ch_names.
+
+    Returns
+    -------
+    ch_connectivity : scipy.sparse matrix
+        The connectivity matrix.
+    """
+    if len(ch_names) != len(neighbors):
+        raise ValueError('`ch_names` and `neighbors` must '
+                         'have the same length')
+    set_neighbors = set([c for d in neighbors for c in d])
+    rest = set(ch_names) - set_neighbors
+    if len(rest) > 0:
+        raise ValueError('Some of your neighbors are not present in the '
+                         'list of channel names')
+
+    for neigh in neighbors:
+        if (not isinstance(neigh, list) and
+           not all(isinstance(c, string_types) for c in neigh)):
+            raise ValueError('`neighbors` must be a list of lists of str')
+
+    ch_connectivity = np.eye(len(ch_names), dtype=bool)
+    for ii, neigbs in enumerate(neighbors):
+        ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
+
+    ch_connectivity = sparse.csr_matrix(ch_connectivity)
+    return ch_connectivity
+
+
+def fix_mag_coil_types(info):
+    """Fix Elekta magnetometer coil types
+
+    Parameters
+    ----------
+    info : dict
+        The info dict to correct. Corrections are done in-place.
+
+    Notes
+    -----
+    This function changes magnetometer coil types 3022 (T1: SQ20483N) and
+    3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
+    records in the info structure.
+
+    Neuromag Vectorview systems can contain magnetometers with two
+    different coil sizes (3022 and 3023 vs. 3024). The systems
+    incorporating coils of type 3024 were introduced last and are used at
+    the majority of MEG sites. At some sites with 3024 magnetometers,
+    the data files have still defined the magnetometers to be of type
+    3022 to ensure compatibility with older versions of Neuromag software.
+    In the MNE software as well as in the present version of Neuromag
+    software coil type 3024 is fully supported. Therefore, it is now safe
+    to upgrade the data files to use the true coil type.
+
+    .. note:: The effect of the difference between the coil sizes on the
+              current estimates computed by the MNE software is very small.
+              Therefore the use of mne_fix_mag_coil_types is not mandatory.
+    """
+    picks = pick_types(info, meg='mag')
+    for ii in picks:
+        ch = info['chs'][ii]
+        if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
+                               FIFF.FIFFV_COIL_VV_MAG_T2):
+            ch['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
+    info._check_consistency()
diff --git a/mne/layouts/CTF-275.lout b/mne/channels/data/layouts/CTF-275.lout
similarity index 100%
rename from mne/layouts/CTF-275.lout
rename to mne/channels/data/layouts/CTF-275.lout
diff --git a/mne/layouts/CTF151.lay b/mne/channels/data/layouts/CTF151.lay
similarity index 100%
rename from mne/layouts/CTF151.lay
rename to mne/channels/data/layouts/CTF151.lay
diff --git a/mne/layouts/CTF275.lay b/mne/channels/data/layouts/CTF275.lay
similarity index 100%
rename from mne/layouts/CTF275.lay
rename to mne/channels/data/layouts/CTF275.lay
diff --git a/mne/layouts/EEG1005.lay b/mne/channels/data/layouts/EEG1005.lay
similarity index 100%
rename from mne/layouts/EEG1005.lay
rename to mne/channels/data/layouts/EEG1005.lay
diff --git a/mne/layouts/EGI256.lout b/mne/channels/data/layouts/EGI256.lout
similarity index 100%
rename from mne/layouts/EGI256.lout
rename to mne/channels/data/layouts/EGI256.lout
diff --git a/mne/layouts/KIT-157.lout b/mne/channels/data/layouts/KIT-157.lout
similarity index 99%
rename from mne/layouts/KIT-157.lout
rename to mne/channels/data/layouts/KIT-157.lout
index 39d7d6b..2cf5637 100644
--- a/mne/layouts/KIT-157.lout
+++ b/mne/channels/data/layouts/KIT-157.lout
@@ -1,4 +1,4 @@
--42.19	43.52	-41.7	28.71	
+-42.19	43.52	-41.7	28.71
 001     9.78   -14.18     4.00     3.00 MEG 001
 002     3.31   -16.56     4.00     3.00 MEG 002
 003    12.02   -19.42     4.00     3.00 MEG 003
diff --git a/mne/channels/data/layouts/KIT-AD.lout b/mne/channels/data/layouts/KIT-AD.lout
new file mode 100644
index 0000000..e06356a
--- /dev/null
+++ b/mne/channels/data/layouts/KIT-AD.lout
@@ -0,0 +1,209 @@
+    0.00     1.00     0.00     1.00
+001     0.61     0.56     0.02     0.04 MEG 001
+002     0.59     0.50     0.02     0.04 MEG 002
+003     0.48     0.42     0.02     0.04 MEG 003
+004     0.52     0.43     0.02     0.04 MEG 004
+005     0.43     0.44     0.02     0.04 MEG 005
+006     0.39     0.48     0.02     0.04 MEG 006
+007     0.52     0.70     0.02     0.04 MEG 007
+008     0.58     0.59     0.02     0.04 MEG 008
+009     0.47     0.71     0.02     0.04 MEG 009
+010     0.53     0.49     0.02     0.04 MEG 010
+011     0.57     0.53     0.02     0.04 MEG 011
+012     0.43     0.50     0.02     0.04 MEG 012
+013     0.40     0.55     0.02     0.04 MEG 013
+014     0.57     0.39     0.02     0.04 MEG 014
+015     0.38     0.41     0.02     0.04 MEG 015
+016     0.48     0.37     0.02     0.04 MEG 016
+017     0.16     0.84     0.02     0.04 MEG 017
+018     0.53     0.63     0.02     0.04 MEG 018
+019     0.48     0.53     0.02     0.04 MEG 019
+020     0.44     0.63     0.02     0.04 MEG 020
+021     0.53     0.56     0.02     0.04 MEG 021
+022     0.44     0.57     0.02     0.04 MEG 022
+023     0.56     0.46     0.02     0.04 MEG 023
+024     0.59     0.68     0.02     0.04 MEG 024
+025     0.34     0.86     0.02     0.04 MEG 025
+026     0.39     0.89     0.02     0.04 MEG 026
+027     0.50     0.91     0.02     0.04 MEG 027
+028     0.61     0.87     0.02     0.04 MEG 028
+029     0.66     0.84     0.02     0.04 MEG 029
+030     0.59     0.76     0.02     0.04 MEG 030
+031     0.39     0.62     0.02     0.04 MEG 031
+032     0.55     0.85     0.02     0.04 MEG 032
+033     0.28     0.39     0.02     0.04 MEG 033
+034     0.37     0.52     0.02     0.04 MEG 034
+035     0.36     0.59     0.02     0.04 MEG 035
+036     0.38     0.70     0.02     0.04 MEG 036
+037     0.07     0.87     0.02     0.04 MEG 037
+038     0.24     0.61     0.02     0.04 MEG 038
+039     0.32     0.68     0.02     0.04 MEG 039
+040     0.30     0.81     0.02     0.04 MEG 040
+041     0.43     0.96     0.02     0.04 MEG 041
+042     0.55     0.95     0.02     0.04 MEG 042
+043     0.42     0.74     0.02     0.04 MEG 043
+044     0.56     0.72     0.02     0.04 MEG 044
+045     0.47     0.76     0.02     0.04 MEG 045
+046     0.52     0.75     0.02     0.04 MEG 046
+047     0.45     0.85     0.02     0.04 MEG 047
+048     0.40     0.79     0.02     0.04 MEG 048
+049     0.24     0.79     0.02     0.04 MEG 049
+050     0.21     0.46     0.02     0.04 MEG 050
+051     0.32     0.76     0.02     0.04 MEG 051
+052     0.20     0.63     0.02     0.04 MEG 052
+053     0.27     0.33     0.02     0.04 MEG 053
+054     0.17     0.74     0.02     0.04 MEG 054
+055     0.05     0.65     0.02     0.04 MEG 055
+056     0.28     0.63     0.02     0.04 MEG 056
+057     0.70     0.62     0.02     0.04 MEG 057
+058     0.94     0.38     0.02     0.04 MEG 058
+059     0.91     0.73     0.02     0.04 MEG 059
+060     0.82     0.93     0.02     0.04 MEG 060
+061     0.93     0.63     0.02     0.04 MEG 061
+062     0.75     0.78     0.02     0.04 MEG 062
+063     0.69     0.78     0.02     0.04 MEG 063
+064     0.43     0.00     0.02     0.04 MEG 064
+065     0.18     0.40     0.02     0.04 MEG 065
+066     0.19     0.29     0.02     0.04 MEG 066
+067     0.15     0.56     0.02     0.04 MEG 067
+068     0.33     0.53     0.02     0.04 MEG 068
+069     0.35     0.47     0.02     0.04 MEG 069
+070     0.25     0.89     0.02     0.04 MEG 070
+071     0.24     0.53     0.02     0.04 MEG 071
+072     0.16     0.95     0.02     0.04 MEG 072
+073     0.67     0.75     0.02     0.04 MEG 073
+074     0.74     0.86     0.02     0.04 MEG 074
+075     0.81     0.71     0.02     0.04 MEG 075
+076     0.78     0.62     0.02     0.04 MEG 076
+077     0.65     0.65     0.02     0.04 MEG 077
+078     0.83     0.81     0.02     0.04 MEG 078
+079     0.82     0.53     0.02     0.04 MEG 079
+080     0.78     0.36     0.02     0.04 MEG 080
+081     0.56     0.65     0.02     0.04 MEG 081
+082     0.35     0.74     0.02     0.04 MEG 082
+083     0.21     0.71     0.02     0.04 MEG 083
+084     0.12     0.75     0.02     0.04 MEG 084
+085     0.11     0.66     0.02     0.04 MEG 085
+086     0.21     0.92     0.02     0.04 MEG 086
+087     0.13     0.96     0.02     0.04 MEG 087
+088     0.03     0.76     0.02     0.04 MEG 088
+089     0.66     0.89     0.02     0.04 MEG 089
+090     0.61     0.93     0.02     0.04 MEG 090
+091     0.63     0.79     0.02     0.04 MEG 091
+092     0.71     0.84     0.02     0.04 MEG 092
+093     0.44     0.91     0.02     0.04 MEG 093
+094     0.56     0.89     0.02     0.04 MEG 094
+095     0.42     0.68     0.02     0.04 MEG 095
+096     0.54     0.79     0.02     0.04 MEG 096
+097     0.11     0.86     0.02     0.04 MEG 097
+098     0.14     0.36     0.02     0.04 MEG 098
+099     0.32     0.60     0.02     0.04 MEG 099
+100     0.25     0.45     0.02     0.04 MEG 100
+101     0.19     0.54     0.02     0.04 MEG 101
+102     0.27     0.85     0.02     0.04 MEG 102
+103     0.27     0.75     0.02     0.04 MEG 103
+104     0.01     0.64     0.02     0.04 MEG 104
+105     0.69     0.68     0.02     0.04 MEG 105
+106     0.88     0.82     0.02     0.04 MEG 106
+107     0.45     0.80     0.02     0.04 MEG 107
+108     0.50     0.86     0.02     0.04 MEG 108
+109     0.36     0.80     0.02     0.04 MEG 109
+110     0.49     0.96     0.02     0.04 MEG 110
+111     0.37     0.93     0.02     0.04 MEG 111
+112     0.32     0.90     0.02     0.04 MEG 112
+113     0.07     0.42     0.02     0.04 MEG 113
+114     0.73     0.72     0.02     0.04 MEG 114
+115     0.19     0.12     0.02     0.04 MEG 115
+116     0.01     0.51     0.02     0.04 MEG 116
+117     0.07     0.29     0.02     0.04 MEG 117
+118     0.16     0.47     0.02     0.04 MEG 118
+119     0.22     0.33     0.02     0.04 MEG 119
+120     0.10     0.54     0.02     0.04 MEG 120
+121     0.78     0.89     0.02     0.04 MEG 121
+122     0.87     0.63     0.02     0.04 MEG 122
+123     0.86     0.72     0.02     0.04 MEG 123
+124     0.77     0.70     0.02     0.04 MEG 124
+125     0.63     0.71     0.02     0.04 MEG 125
+126     0.89     0.27     0.02     0.04 MEG 126
+127     0.97     0.62     0.02     0.04 MEG 127
+128     0.83     0.62     0.02     0.04 MEG 128
+129     0.77     0.11     0.02     0.04 MEG 129
+130     0.86     0.95     0.02     0.04 MEG 130
+131     0.71     0.42     0.02     0.04 MEG 131
+132     0.78     0.53     0.02     0.04 MEG 132
+133     0.65     0.57     0.02     0.04 MEG 133
+134     0.16     0.67     0.02     0.04 MEG 134
+135     0.29     0.71     0.02     0.04 MEG 135
+136     0.16     0.23     0.02     0.04 MEG 136
+137     0.82     0.34     0.02     0.04 MEG 137
+138     0.87     0.52     0.02     0.04 MEG 138
+139     0.81     0.22     0.02     0.04 MEG 139
+140     0.90     0.40     0.02     0.04 MEG 140
+141     0.97     0.49     0.02     0.04 MEG 141
+142     0.74     0.30     0.02     0.04 MEG 142
+143     0.81     0.44     0.02     0.04 MEG 143
+144     0.95     0.75     0.02     0.04 MEG 144
+145     0.13     0.19     0.02     0.04 MEG 145
+146     0.28     0.56     0.02     0.04 MEG 146
+147     0.74     0.15     0.02     0.04 MEG 147
+148     0.10     0.33     0.02     0.04 MEG 148
+149     0.35     0.02     0.02     0.04 MEG 149
+150     0.03     0.39     0.02     0.04 MEG 150
+151     0.27     0.06     0.02     0.04 MEG 151
+152     0.31     0.43     0.02     0.04 MEG 152
+153     0.77     0.26     0.02     0.04 MEG 153
+154     0.67     0.10     0.02     0.04 MEG 154
+155     0.76     0.44     0.02     0.04 MEG 155
+156     0.83     0.18     0.02     0.04 MEG 156
+157     0.61     0.02     0.02     0.04 MEG 157
+158     0.91     0.86     0.02     0.04 MEG 158
+159     0.92     0.51     0.02     0.04 MEG 159
+160     0.86     0.30     0.02     0.04 MEG 160
+161     0.44     0.12     0.02     0.04 MEG 161
+162     0.37     0.30     0.02     0.04 MEG 162
+163     0.30     0.17     0.02     0.04 MEG 163
+164     0.36     0.25     0.02     0.04 MEG 164
+165     0.41     0.22     0.02     0.04 MEG 165
+166     0.31     0.28     0.02     0.04 MEG 166
+167     0.05     0.53     0.02     0.04 MEG 167
+168     0.08     0.76     0.02     0.04 MEG 168
+169     0.69     0.24     0.02     0.04 MEG 169
+170     0.57     0.18     0.02     0.04 MEG 170
+171     0.50     0.17     0.02     0.04 MEG 171
+172     0.64     0.20     0.02     0.04 MEG 172
+173     0.65     0.42     0.02     0.04 MEG 173
+174     0.69     0.53     0.02     0.04 MEG 174
+175     0.61     0.44     0.02     0.04 MEG 175
+176     0.70     0.32     0.02     0.04 MEG 176
+177     0.44     0.17     0.02     0.04 MEG 177
+178     0.38     0.18     0.02     0.04 MEG 178
+179     0.32     0.22     0.02     0.04 MEG 179
+180     0.44     0.06     0.02     0.04 MEG 180
+181     0.22     0.16     0.02     0.04 MEG 181
+182     0.36     0.07     0.02     0.04 MEG 182
+183     0.28     0.11     0.02     0.04 MEG 183
+184     0.42     0.27     0.02     0.04 MEG 184
+185     0.52     0.32     0.02     0.04 MEG 185
+186     0.57     0.33     0.02     0.04 MEG 186
+187     0.47     0.32     0.02     0.04 MEG 187
+188     0.62     0.37     0.02     0.04 MEG 188
+189     0.73     0.49     0.02     0.04 MEG 189
+190     0.67     0.36     0.02     0.04 MEG 190
+191     0.74     0.57     0.02     0.04 MEG 191
+192     0.64     0.49     0.02     0.04 MEG 192
+193     0.59     0.06     0.02     0.04 MEG 193
+194     0.52    -0.00     0.02     0.04 MEG 194
+195     0.58     0.29     0.02     0.04 MEG 195
+196     0.53     0.27     0.02     0.04 MEG 196
+197     0.47     0.26     0.02     0.04 MEG 197
+198     0.34     0.39     0.02     0.04 MEG 198
+199     0.42     0.33     0.02     0.04 MEG 199
+200     0.38     0.35     0.02     0.04 MEG 200
+201     0.53     0.22     0.02     0.04 MEG 201
+202     0.59     0.24     0.02     0.04 MEG 202
+203     0.65     0.27     0.02     0.04 MEG 203
+204     0.27     0.26     0.02     0.04 MEG 204
+205     0.51     0.11     0.02     0.04 MEG 205
+206     0.65     0.15     0.02     0.04 MEG 206
+207     0.51     0.05     0.02     0.04 MEG 207
+208     0.69     0.05     0.02     0.04 MEG 208
diff --git a/mne/layouts/Vectorview-all.lout b/mne/channels/data/layouts/Vectorview-all.lout
similarity index 100%
rename from mne/layouts/Vectorview-all.lout
rename to mne/channels/data/layouts/Vectorview-all.lout
diff --git a/mne/layouts/Vectorview-grad.lout b/mne/channels/data/layouts/Vectorview-grad.lout
similarity index 100%
rename from mne/layouts/Vectorview-grad.lout
rename to mne/channels/data/layouts/Vectorview-grad.lout
diff --git a/mne/layouts/Vectorview-mag.lout b/mne/channels/data/layouts/Vectorview-mag.lout
similarity index 100%
rename from mne/layouts/Vectorview-mag.lout
rename to mne/channels/data/layouts/Vectorview-mag.lout
diff --git a/mne/channels/data/layouts/biosemi.lay b/mne/channels/data/layouts/biosemi.lay
new file mode 100644
index 0000000..ca74816
--- /dev/null
+++ b/mne/channels/data/layouts/biosemi.lay
@@ -0,0 +1,64 @@
+1	-0.496189	1.527114	0.290000	0.230000	Fp1
+2	-0.943808	1.299041	0.290000	0.230000	AF7
+3	-0.545830	1.170536	0.290000	0.230000	AF3
+4	-0.326906	0.809121	0.290000	0.230000	F1
+5	-0.659023	0.813825	0.290000	0.230000	F3
+6	-0.987913	0.858779	0.290000	0.230000	F5
+7	-1.299041	0.943808	0.290000	0.230000	F7
+8	-1.527114	0.496189	0.290000	0.230000	FT7
+9	-1.173172	0.450338	0.290000	0.230000	FC5
+10	-0.770517	0.409691	0.290000	0.230000	FC3
+11	-0.394923	0.394923	0.290000	0.230000	FC1
+12	-0.401426	-0.000000	0.290000	0.230000	C1
+13	-0.802851	-0.000000	0.290000	0.230000	C3
+14	-1.204277	-0.000000	0.290000	0.230000	C5
+15	-1.605703	-0.000000	0.290000	0.230000	T7
+16	-1.527114	-0.496189	0.290000	0.230000	TP7
+17	-1.173172	-0.450338	0.290000	0.230000	CP5
+18	-0.770517	-0.409691	0.290000	0.230000	CP3
+19	-0.394923	-0.394923	0.290000	0.230000	CP1
+20	-0.326906	-0.809121	0.290000	0.230000	P1
+21	-0.659023	-0.813825	0.290000	0.230000	P3
+22	-0.987913	-0.858779	0.290000	0.230000	P5
+23	-1.299041	-0.943808	0.290000	0.230000	P7
+24	-1.537550	-1.290157	0.290000	0.230000	P9
+25	-0.943808	-1.299041	0.290000	0.230000	PO7
+26	-0.545830	-1.170536	0.290000	0.230000	PO3
+27	-0.496189	-1.527114	0.290000	0.230000	O1
+28	0.000000	-2.007129	0.290000	0.230000	Iz
+29	0.000000	-1.605703	0.290000	0.230000	Oz
+30	0.000000	-1.204277	0.290000	0.230000	POz
+31	0.000000	-0.802851	0.290000	0.230000	Pz
+32	0.000000	-0.401426	0.290000	0.230000	CPz
+33	0.000000	1.605703	0.290000	0.230000	Fpz
+34	0.496189	1.527114	0.290000	0.230000	Fp2
+35	0.943808	1.299041	0.290000	0.230000	AF8
+36	0.545830	1.170536	0.290000	0.230000	AF4
+37	0.000000	1.204277	0.290000	0.230000	AFz
+38	0.000000	0.802851	0.290000	0.230000	Fz
+39	0.326906	0.809121	0.290000	0.230000	F2
+40	0.659023	0.813825	0.290000	0.230000	F4
+41	0.987913	0.858779	0.290000	0.230000	F6
+42	1.299041	0.943808	0.290000	0.230000	F8
+43	1.527114	0.496189	0.290000	0.230000	FT8
+44	1.173172	0.450338	0.290000	0.230000	FC6
+45	0.770517	0.409691	0.290000	0.230000	FC4
+46	0.394923	0.394923	0.290000	0.230000	FC2
+47	0.000000	0.401426	0.290000	0.230000	FCz
+48	0.000000	0.000000	0.290000	0.230000	Cz
+49	0.401426	0.000000	0.290000	0.230000	C2
+50	0.802851	0.000000	0.290000	0.230000	C4
+51	1.204277	0.000000	0.290000	0.230000	C6
+52	1.605703	0.000000	0.290000	0.230000	T8
+53	1.527114	-0.496189	0.290000	0.230000	TP8
+54	1.173172	-0.450338	0.290000	0.230000	CP6
+55	0.770517	-0.409691	0.290000	0.230000	CP4
+56	0.394923	-0.394923	0.290000	0.230000	CP2
+57	0.326906	-0.809121	0.290000	0.230000	P2
+58	0.659023	-0.813825	0.290000	0.230000	P4
+59	0.987913	-0.858779	0.290000	0.230000	P6
+60	1.299041	-0.943808	0.290000	0.230000	P8
+61	1.537550	-1.290157	0.290000	0.230000	P10
+62	0.943808	-1.299041	0.290000	0.230000	PO8
+63	0.545830	-1.170536	0.290000	0.230000	PO4
+64	0.496189	-1.527114	0.290000	0.230000	O2
\ No newline at end of file
diff --git a/mne/layouts/magnesWH3600.lout b/mne/channels/data/layouts/magnesWH3600.lout
similarity index 100%
rename from mne/layouts/magnesWH3600.lout
rename to mne/channels/data/layouts/magnesWH3600.lout
diff --git a/mne/channels/data/montages/10-5-System_Mastoids_EGI129.csd b/mne/channels/data/montages/10-5-System_Mastoids_EGI129.csd
new file mode 100755
index 0000000..3db91f3
--- /dev/null
+++ b/mne/channels/data/montages/10-5-System_Mastoids_EGI129.csd
@@ -0,0 +1,467 @@
+// MatLab   Sphere coordinates [degrees]         Cartesian coordinates
+// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface
+    Nose      90.000   -33.750     1.000    0.0000    0.8315   -0.5556     -0.00000000000000005
+      Nz      90.000   -22.500     1.000    0.0000    0.9239   -0.3827     -0.00000000000000002
+    NFpz      90.000   -11.250     1.000    0.0000    0.9808   -0.1951     -0.00000000000000004
+     Fpz      90.000     0.000     1.000    0.0000    1.0000    0.0000      0.00000000000000000
+    AFpz      90.000    11.250     1.000    0.0000    0.9808    0.1951     -0.00000000000000004
+     AFz      90.000    22.500     1.000    0.0000    0.9239    0.3827     -0.00000000000000002
+    AFFz      90.000    33.750     1.000    0.0000    0.8315    0.5556     -0.00000000000000005
+      Fz      90.000    45.000     1.000    0.0000    0.7071    0.7071     -0.00000000000000002
+    FFCz      90.000    56.250     1.000    0.0000    0.5556    0.8315      0.00000000000000007
+     FCz      90.000    67.500     1.000    0.0000    0.3827    0.9239      0.00000000000000002
+    FCCz      90.000    78.750     1.000    0.0000    0.1951    0.9808     -0.00000000000000001
+      Cz       0.000    90.000     1.000    0.0000    0.0000    1.0000      0.00000000000000000
+    CCPz     -90.000    78.750     1.000    0.0000   -0.1951    0.9808     -0.00000000000000001
+     CPz     -90.000    67.500     1.000    0.0000   -0.3827    0.9239      0.00000000000000002
+    CPPz     -90.000    56.250     1.000    0.0000   -0.5556    0.8315      0.00000000000000007
+      Pz     -90.000    45.000     1.000    0.0000   -0.7071    0.7071     -0.00000000000000002
+    PPOz     -90.000    33.750     1.000    0.0000   -0.8315    0.5556     -0.00000000000000005
+     POz     -90.000    22.500     1.000    0.0000   -0.9239    0.3827     -0.00000000000000002
+    POOz     -90.000    11.250     1.000    0.0000   -0.9808    0.1951     -0.00000000000000004
+      Oz     -90.000     0.000     1.000    0.0000   -1.0000    0.0000      0.00000000000000000
+     OIz     -90.000   -11.250     1.000    0.0000   -0.9808   -0.1951     -0.00000000000000004
+      Iz     -90.000   -22.500     1.000    0.0000   -0.9239   -0.3827     -0.00000000000000002
+     N1h      99.000   -22.500     1.000   -0.1445    0.9125   -0.3827      0.00000000000000011
+      N1     108.000   -22.500     1.000   -0.2855    0.8787   -0.3827     -0.00000000000000004
+    AFp9     117.000   -22.500     1.000   -0.4194    0.8232   -0.3827      0.00000000000000003
+     AF9     126.000   -22.500     1.000   -0.5430    0.7474   -0.3827      0.00000000000000001
+    AFF9     135.000   -22.500     1.000   -0.6533    0.6533   -0.3827     -0.00000000000000007
+      F9     144.000   -22.500     1.000   -0.7474    0.5430   -0.3827     -0.00000000000000003
+    FFT9     153.000   -22.500     1.000   -0.8232    0.4194   -0.3827     -0.00000000000000001
+     FT9     162.000   -22.500     1.000   -0.8787    0.2855   -0.3827      0.00000000000000006
+    FTT9     171.000   -22.500     1.000   -0.9125    0.1445   -0.3827     -0.00000000000000004
+      T9     180.000   -22.500     1.000   -0.9239    0.0000   -0.3827     -0.00000000000000002
+    TTP9    -171.000   -22.500     1.000   -0.9125   -0.1445   -0.3827     -0.00000000000000004
+     TP9    -162.000   -22.500     1.000   -0.8787   -0.2855   -0.3827      0.00000000000000006
+    TPP9    -153.000   -22.500     1.000   -0.8232   -0.4194   -0.3827     -0.00000000000000001
+      P9    -144.000   -22.500     1.000   -0.7474   -0.5430   -0.3827     -0.00000000000000003
+    PPO9    -135.000   -22.500     1.000   -0.6533   -0.6533   -0.3827     -0.00000000000000007
+     PO9    -126.000   -22.500     1.000   -0.5430   -0.7474   -0.3827      0.00000000000000001
+    POO9    -117.000   -22.500     1.000   -0.4194   -0.8232   -0.3827      0.00000000000000003
+      I1    -108.000   -22.500     1.000   -0.2855   -0.8787   -0.3827     -0.00000000000000004
+     I1h     -99.000   -22.500     1.000   -0.1445   -0.9125   -0.3827      0.00000000000000011
+   NFp1h      99.000   -11.250     1.000   -0.1534    0.9687   -0.1951     -0.00000000000000004
+    NFp1     108.000   -11.250     1.000   -0.3031    0.9328   -0.1951     -0.00000000000000009
+   AFp9h     117.000   -11.250     1.000   -0.4453    0.8739   -0.1951     -0.00000000000000008
+    AF9h     126.000   -11.250     1.000   -0.5765    0.7935   -0.1951     -0.00000000000000010
+   AFF9h     135.000   -11.250     1.000   -0.6935    0.6935   -0.1951     -0.00000000000000007
+     F9h     144.000   -11.250     1.000   -0.7935    0.5765   -0.1951      0.00000000000000003
+   FFT9h     153.000   -11.250     1.000   -0.8739    0.4453   -0.1951      0.00000000000000007
+    FT9h     162.000   -11.250     1.000   -0.9328    0.3031   -0.1951      0.00000000000000001
+   FTT9h     171.000   -11.250     1.000   -0.9687    0.1534   -0.1951      0.00000000000000001
+     T9h     180.000   -11.250     1.000   -0.9808    0.0000   -0.1951     -0.00000000000000004
+   TTP9h    -171.000   -11.250     1.000   -0.9687   -0.1534   -0.1951      0.00000000000000001
+    TP9h    -162.000   -11.250     1.000   -0.9328   -0.3031   -0.1951      0.00000000000000001
+   TPP9h    -153.000   -11.250     1.000   -0.8739   -0.4453   -0.1951      0.00000000000000007
+     P9h    -144.000   -11.250     1.000   -0.7935   -0.5765   -0.1951      0.00000000000000003
+   PPO9h    -135.000   -11.250     1.000   -0.6935   -0.6935   -0.1951     -0.00000000000000007
+    PO9h    -126.000   -11.250     1.000   -0.5765   -0.7935   -0.1951     -0.00000000000000010
+   POO9h    -117.000   -11.250     1.000   -0.4453   -0.8739   -0.1951     -0.00000000000000008
+     OI1    -108.000   -11.250     1.000   -0.3031   -0.9328   -0.1951     -0.00000000000000009
+    OI1h     -99.000   -11.250     1.000   -0.1534   -0.9687   -0.1951     -0.00000000000000004
+    Fp1h      99.000     0.000     1.000   -0.1564    0.9877    0.0000      0.00000000000000007
+     Fp1     108.000     0.000     1.000   -0.3090    0.9511    0.0000      0.00000000000000008
+    AFp7     117.000     0.000     1.000   -0.4540    0.8910    0.0000     -0.00000000000000002
+     AF7     126.000     0.000     1.000   -0.5878    0.8090    0.0000     -0.00000000000000008
+    AFF7     135.000     0.000     1.000   -0.7071    0.7071    0.0000     -0.00000000000000002
+      F7     144.000     0.000     1.000   -0.8090    0.5878    0.0000      0.00000000000000000
+    FFT7     153.000     0.000     1.000   -0.8910    0.4540    0.0000     -0.00000000000000007
+     FT7     162.000     0.000     1.000   -0.9511    0.3090    0.0000     -0.00000000000000003
+    FTT7     171.000     0.000     1.000   -0.9877    0.1564    0.0000     -0.00000000000000010
+      T7     180.000     0.000     1.000   -1.0000    0.0000    0.0000      0.00000000000000000
+    TTP7    -171.000     0.000     1.000   -0.9877   -0.1564    0.0000     -0.00000000000000010
+     TP7    -162.000     0.000     1.000   -0.9511   -0.3090    0.0000     -0.00000000000000003
+    TPP7    -153.000     0.000     1.000   -0.8910   -0.4540    0.0000     -0.00000000000000007
+      P7    -144.000     0.000     1.000   -0.8090   -0.5878    0.0000      0.00000000000000000
+    PPO7    -135.000     0.000     1.000   -0.7071   -0.7071    0.0000     -0.00000000000000002
+     PO7    -126.000     0.000     1.000   -0.5878   -0.8090    0.0000     -0.00000000000000008
+    POO7    -117.000     0.000     1.000   -0.4540   -0.8910    0.0000     -0.00000000000000002
+      O1    -108.000     0.000     1.000   -0.3090   -0.9511    0.0000      0.00000000000000008
+     O1h     -99.000     0.000     1.000   -0.1564   -0.9877    0.0000      0.00000000000000007
+     N2h      81.000   -22.500     1.000    0.1445    0.9125   -0.3827     -0.00000000000000006
+      N2      72.000   -22.500     1.000    0.2855    0.8787   -0.3827      0.00000000000000003
+   AFp10      63.000   -22.500     1.000    0.4194    0.8232   -0.3827     -0.00000000000000005
+    AF10      54.000   -22.500     1.000    0.5430    0.7474   -0.3827     -0.00000000000000003
+   AFF10      45.000   -22.500     1.000    0.6533    0.6533   -0.3827      0.00000000000000007
+     F10      36.000   -22.500     1.000    0.7474    0.5430   -0.3827      0.00000000000000013
+   FFT10      27.000   -22.500     1.000    0.8232    0.4194   -0.3827      0.00000000000000008
+    FT10      18.000   -22.500     1.000    0.8787    0.2855   -0.3827     -0.00000000000000000
+   FTT10       9.000   -22.500     1.000    0.9125    0.1445   -0.3827     -0.00000000000000008
+     T10       0.000   -22.500     1.000    0.9239    0.0000   -0.3827     -0.00000000000000002
+   TTP10      -9.000   -22.500     1.000    0.9125   -0.1445   -0.3827     -0.00000000000000008
+    TP10     -18.000   -22.500     1.000    0.8787   -0.2855   -0.3827     -0.00000000000000000
+   TPP10     -27.000   -22.500     1.000    0.8232   -0.4194   -0.3827      0.00000000000000008
+     P10     -36.000   -22.500     1.000    0.7474   -0.5430   -0.3827      0.00000000000000013
+   PPO10     -45.000   -22.500     1.000    0.6533   -0.6533   -0.3827      0.00000000000000007
+    PO10     -54.000   -22.500     1.000    0.5430   -0.7474   -0.3827     -0.00000000000000003
+   POO10     -63.000   -22.500     1.000    0.4194   -0.8232   -0.3827     -0.00000000000000005
+      I2     -72.000   -22.500     1.000    0.2855   -0.8787   -0.3827      0.00000000000000003
+     I2h     -81.000   -22.500     1.000    0.1445   -0.9125   -0.3827     -0.00000000000000006
+   NFp2h      81.000   -11.250     1.000    0.1534    0.9687   -0.1951     -0.00000000000000000
+    NFp2      72.000   -11.250     1.000    0.3031    0.9328   -0.1951     -0.00000000000000002
+  AFp10h      63.000   -11.250     1.000    0.4453    0.8739   -0.1951      0.00000000000000002
+   AF10h      54.000   -11.250     1.000    0.5765    0.7935   -0.1951      0.00000000000000003
+  AFF10h      45.000   -11.250     1.000    0.6935    0.6935   -0.1951      0.00000000000000008
+    F10h      36.000   -11.250     1.000    0.7935    0.5765   -0.1951      0.00000000000000003
+  FFT10h      27.000   -11.250     1.000    0.8739    0.4453   -0.1951     -0.00000000000000003
+   FT10h      18.000   -11.250     1.000    0.9328    0.3031   -0.1951     -0.00000000000000005
+  FTT10h       9.000   -11.250     1.000    0.9687    0.1534   -0.1951     -0.00000000000000002
+    T10h       0.000   -11.250     1.000    0.9808    0.0000   -0.1951     -0.00000000000000004
+  TTP10h      -9.000   -11.250     1.000    0.9687   -0.1534   -0.1951     -0.00000000000000002
+   TP10h     -18.000   -11.250     1.000    0.9328   -0.3031   -0.1951     -0.00000000000000005
+  TPP10h     -27.000   -11.250     1.000    0.8739   -0.4453   -0.1951     -0.00000000000000003
+    P10h     -36.000   -11.250     1.000    0.7935   -0.5765   -0.1951      0.00000000000000003
+  PPO10h     -45.000   -11.250     1.000    0.6935   -0.6935   -0.1951      0.00000000000000008
+   PO10h     -54.000   -11.250     1.000    0.5765   -0.7935   -0.1951      0.00000000000000003
+  POO10h     -63.000   -11.250     1.000    0.4453   -0.8739   -0.1951      0.00000000000000002
+     OI2     -72.000   -11.250     1.000    0.3031   -0.9328   -0.1951     -0.00000000000000002
+    OI2h     -81.000   -11.250     1.000    0.1534   -0.9687   -0.1951     -0.00000000000000000
+    Fp2h      81.000     0.000     1.000    0.1564    0.9877    0.0000      0.00000000000000010
+     Fp2      72.000     0.000     1.000    0.3090    0.9511    0.0000     -0.00000000000000006
+    AFp8      63.000     0.000     1.000    0.4540    0.8910    0.0000     -0.00000000000000012
+     AF8      54.000     0.000     1.000    0.5878    0.8090    0.0000      0.00000000000000005
+    AFF8      45.000     0.000     1.000    0.7071    0.7071    0.0000     -0.00000000000000002
+      F8      36.000     0.000     1.000    0.8090    0.5878    0.0000      0.00000000000000005
+    FFT8      27.000     0.000     1.000    0.8910    0.4540    0.0000      0.00000000000000003
+     FT8      18.000     0.000     1.000    0.9511    0.3090    0.0000     -0.00000000000000010
+    FTT8       9.000     0.000     1.000    0.9877    0.1564    0.0000      0.00000000000000009
+      T8       0.000     0.000     1.000    1.0000    0.0000    0.0000      0.00000000000000000
+    TTP8      -9.000     0.000     1.000    0.9877   -0.1564    0.0000      0.00000000000000009
+     TP8     -18.000     0.000     1.000    0.9511   -0.3090    0.0000     -0.00000000000000010
+    TPP8     -27.000     0.000     1.000    0.8910   -0.4540    0.0000      0.00000000000000003
+      P8     -36.000     0.000     1.000    0.8090   -0.5878    0.0000      0.00000000000000005
+    PPO8     -45.000     0.000     1.000    0.7071   -0.7071    0.0000     -0.00000000000000002
+     PO8     -54.000     0.000     1.000    0.5878   -0.8090    0.0000      0.00000000000000005
+    POO8     -63.000     0.000     1.000    0.4540   -0.8910    0.0000     -0.00000000000000012
+      O2     -72.000     0.000     1.000    0.3090   -0.9511    0.0000     -0.00000000000000006
+     O2h     -81.000     0.000     1.000    0.1564   -0.9877    0.0000      0.00000000000000010
+     T7h     180.000    11.250     1.000   -0.9808    0.0000    0.1951     -0.00000000000000004
+      C5     180.000    22.500     1.000   -0.9239    0.0000    0.3827     -0.00000000000000002
+     C5h     180.000    33.750     1.000   -0.8315    0.0000    0.5556     -0.00000000000000005
+      C3     180.000    45.000     1.000   -0.7071    0.0000    0.7071     -0.00000000000000002
+     C3h     180.000    56.250     1.000   -0.5556    0.0000    0.8315      0.00000000000000007
+      C1     180.000    67.500     1.000   -0.3827    0.0000    0.9239      0.00000000000000002
+     C1h     180.000    78.750     1.000   -0.1951    0.0000    0.9808     -0.00000000000000001
+     T8h       0.000    11.250     1.000    0.9808    0.0000    0.1951     -0.00000000000000004
+      C6       0.000    22.500     1.000    0.9239    0.0000    0.3827     -0.00000000000000002
+     C6h       0.000    33.750     1.000    0.8315    0.0000    0.5556     -0.00000000000000005
+      C4       0.000    45.000     1.000    0.7071    0.0000    0.7071     -0.00000000000000002
+     C4h       0.000    56.250     1.000    0.5556    0.0000    0.8315      0.00000000000000007
+      C2       0.000    67.500     1.000    0.3827    0.0000    0.9239      0.00000000000000002
+     C2h       0.000    78.750     1.000    0.1951    0.0000    0.9808     -0.00000000000000001
+      F3     129.254    29.833     1.000   -0.5489    0.6717    0.4975      0.00000000000000004
+      F4      50.746    29.833     1.000    0.5489    0.6717    0.4975      0.00000000000000004
+      P3    -129.254    29.833     1.000   -0.5489   -0.6717    0.4975      0.00000000000000004
+      P4     -50.746    29.833     1.000    0.5489   -0.6717    0.4975      0.00000000000000004
+      F5     138.891    15.619     1.000   -0.7256    0.6332    0.2692      0.00000000000000002
+      F6      41.109    15.619     1.000    0.7256    0.6332    0.2692      0.00000000000000002
+      P5    -138.891    15.619     1.000   -0.7256   -0.6332    0.2692      0.00000000000000002
+      P6     -41.109    15.619     1.000    0.7256   -0.6332    0.2692      0.00000000000000002
+      F1     112.953    40.722     1.000   -0.2956    0.6979    0.6524     -0.00000000000000007
+      F2      67.047    40.722     1.000    0.2956    0.6979    0.6524     -0.00000000000000011
+      P1    -112.953    40.722     1.000   -0.2956   -0.6979    0.6524     -0.00000000000000013
+      P2     -67.047    40.722     1.000    0.2956   -0.6979    0.6524     -0.00000000000000011
+     F7h     141.913     7.907     1.000   -0.7796    0.6110    0.1376      0.00000000000000007
+     F8h      38.087     7.907     1.000    0.7796    0.6110    0.1376      0.00000000000000007
+     P7h    -141.913     7.907     1.000   -0.7796   -0.6110    0.1376     -0.00000000000000004
+     P8h     -38.087     7.907     1.000    0.7796   -0.6110    0.1376      0.00000000000000007
+     F5h     134.752    22.998     1.000   -0.6481    0.6537    0.3907      0.00000000000000009
+     F6h      45.248    22.998     1.000    0.6481    0.6537    0.3907     -0.00000000000000005
+     P5h    -134.752    22.998     1.000   -0.6481   -0.6537    0.3907      0.00000000000000009
+     P6h     -45.248    22.998     1.000    0.6481   -0.6537    0.3907     -0.00000000000000005
+     F3h     122.046    35.889     1.000   -0.4299    0.6867    0.5862      0.00000000000000003
+     F4h      57.954    35.889     1.000    0.4299    0.6867    0.5862      0.00000000000000008
+     P3h    -122.046    35.889     1.000   -0.4299   -0.6867    0.5862      0.00000000000000012
+     P4h     -57.954    35.889     1.000    0.4299   -0.6867    0.5862      0.00000000000000008
+     F1h     102.055    43.890     1.000   -0.1505    0.7048    0.6933      0.00000000000000010
+     F2h      77.945    43.890     1.000    0.1505    0.7048    0.6933      0.00000000000000004
+     P1h    -102.055    43.890     1.000   -0.1505   -0.7048    0.6933      0.00000000000000006
+     P2h     -77.945    43.890     1.000    0.1505   -0.7048    0.6933      0.00000000000000004
+     FC3     151.481    40.847     1.000   -0.6647    0.3612    0.6540      0.00000000000000005
+     FC4      28.519    40.847     1.000    0.6647    0.3612    0.6540     -0.00000000000000003
+     CP3    -151.481    40.847     1.000   -0.6647   -0.3612    0.6540     -0.00000000000000000
+     CP4     -28.519    40.847     1.000    0.6647   -0.3612    0.6540     -0.00000000000000003
+     FC5     158.854    20.773     1.000   -0.8720    0.3373    0.3547     -0.00000000000000003
+     FC6      21.146    20.773     1.000    0.8720    0.3373    0.3547     -0.00000000000000008
+     CP5    -158.854    20.773     1.000   -0.8720   -0.3373    0.3547      0.00000000000000011
+     CP6     -21.146    20.773     1.000    0.8720   -0.3373    0.3547     -0.00000000000000008
+     FC1     133.587    58.627     1.000   -0.3589    0.3771    0.8538     -0.00000000000000003
+     FC2      46.413    58.627     1.000    0.3589    0.3771    0.8538     -0.00000000000000003
+     CP1    -133.587    58.627     1.000   -0.3589   -0.3771    0.8538     -0.00000000000000003
+     CP2     -46.413    58.627     1.000    0.3589   -0.3771    0.8538     -0.00000000000000003
+    FT7h     160.798    10.433     1.000   -0.9288    0.3235    0.1811     -0.00000000000000008
+    FT8h      19.202    10.433     1.000    0.9288    0.3235    0.1811     -0.00000000000000002
+    TP7h    -160.798    10.433     1.000   -0.9288   -0.3235    0.1811     -0.00000000000000008
+    TP8h     -19.202    10.433     1.000    0.9288   -0.3235    0.1811     -0.00000000000000002
+    FC5h     155.912    30.952     1.000   -0.7829    0.3500    0.5143      0.00000000000000003
+    FC6h      24.088    30.952     1.000    0.7829    0.3500    0.5143      0.00000000000000003
+    CP5h    -155.912    30.952     1.000   -0.7829   -0.3500    0.5143      0.00000000000000003
+    CP6h     -24.088    30.952     1.000    0.7829   -0.3500    0.5143      0.00000000000000003
+    FC3h     144.625    50.235     1.000   -0.5215    0.3703    0.7687      0.00000000000000004
+    FC4h      35.375    50.235     1.000    0.5215    0.3703    0.7687     -0.00000000000000000
+    CP3h    -144.625    50.235     1.000   -0.5215   -0.3703    0.7687     -0.00000000000000005
+    CP4h     -35.375    50.235     1.000    0.5215   -0.3703    0.7687     -0.00000000000000000
+    FC1h     115.626    64.984     1.000   -0.1829    0.3813    0.9062      0.00000000000000006
+    FC2h      64.374    64.984     1.000    0.1829    0.3813    0.9062      0.00000000000000008
+    CP1h    -115.626    64.984     1.000   -0.1829   -0.3813    0.9062      0.00000000000000006
+    CP2h     -64.374    64.984     1.000    0.1829   -0.3813    0.9062      0.00000000000000008
+     AF3     113.312    15.040     1.000   -0.3822    0.8869    0.2595     -0.00000000000000007
+     AF4      66.688    15.040     1.000    0.3822    0.8869    0.2595      0.00000000000000002
+     PO3    -113.312    15.040     1.000   -0.3822   -0.8869    0.2595     -0.00000000000000007
+     PO4     -66.688    15.040     1.000    0.3822   -0.8869    0.2595      0.00000000000000002
+     AF5     120.854     7.908     1.000   -0.5080    0.8503    0.1376      0.00000000000000011
+     AF6      59.146     7.908     1.000    0.5080    0.8503    0.1376      0.00000000000000003
+     PO5    -120.854     7.908     1.000   -0.5080   -0.8503    0.1376      0.00000000000000007
+     PO6     -59.146     7.908     1.000    0.5080   -0.8503    0.1376      0.00000000000000003
+     AF1     102.721    20.458     1.000   -0.2063    0.9139    0.3495      0.00000000000000005
+     AF2      77.279    20.458     1.000    0.2063    0.9139    0.3495      0.00000000000000009
+     PO1    -102.721    20.458     1.000   -0.2063   -0.9139    0.3495      0.00000000000000002
+     PO2     -77.279    20.458     1.000    0.2063   -0.9139    0.3495      0.00000000000000009
+    AF7h     123.694     4.005     1.000   -0.5534    0.8300    0.0698      0.00000000000000004
+    AF8h      56.306     4.005     1.000    0.5534    0.8300    0.0698     -0.00000000000000008
+    PO7h    -123.694     4.005     1.000   -0.5534   -0.8300    0.0698      0.00000000000000004
+    PO8h     -56.306     4.005     1.000    0.5534   -0.8300    0.0698     -0.00000000000000008
+    AF5h     117.408    11.630     1.000   -0.4509    0.8695    0.2016      0.00000000000000005
+    AF6h      62.592    11.630     1.000    0.4509    0.8695    0.2016     -0.00000000000000004
+    PO5h    -117.408    11.630     1.000   -0.4509   -0.8695    0.2016      0.00000000000000009
+    PO6h     -62.592    11.630     1.000    0.4509   -0.8695    0.2016     -0.00000000000000004
+    AF3h     108.359    18.087     1.000   -0.2994    0.9022    0.3105      0.00000000000000006
+    AF4h      71.641    18.087     1.000    0.2994    0.9022    0.3105     -0.00000000000000000
+    PO3h    -108.359    18.087     1.000   -0.2994   -0.9022    0.3105      0.00000000000000010
+    PO4h     -71.641    18.087     1.000    0.2994   -0.9022    0.3105     -0.00000000000000000
+    AF1h      96.517    21.977     1.000   -0.1052    0.9213    0.3742     -0.00000000000000001
+    AF2h      83.483    21.977     1.000    0.1052    0.9213    0.3742      0.00000000000000001
+    PO1h     -96.517    21.977     1.000   -0.1052   -0.9213    0.3742      0.00000000000000003
+    PO2h     -83.483    21.977     1.000    0.1052   -0.9213    0.3742      0.00000000000000001
+    AFp3     106.794     7.311     1.000   -0.2866    0.9496    0.1273     -0.00000000000000001
+    AFp4      73.206     7.311     1.000    0.2866    0.9496    0.1273     -0.00000000000000007
+    POO3    -106.794     7.311     1.000   -0.2866   -0.9496    0.1273     -0.00000000000000009
+    POO4     -73.206     7.311     1.000    0.2866   -0.9496    0.1273     -0.00000000000000007
+    AFp5     112.551     3.800     1.000   -0.3827    0.9215    0.0663      0.00000000000000000
+    AFp6      67.449     3.800     1.000    0.3827    0.9215    0.0663     -0.00000000000000008
+    POO5    -112.551     3.800     1.000   -0.3827   -0.9215    0.0663     -0.00000000000000008
+    POO6     -67.449     3.800     1.000    0.3827   -0.9215    0.0663     -0.00000000000000008
+    AFp1      99.082    10.141     1.000   -0.1554    0.9720    0.1761      0.00000000000000002
+    AFp2      80.918    10.141     1.000    0.1554    0.9720    0.1761      0.00000000000000006
+    POO1     -99.082    10.141     1.000   -0.1554   -0.9720    0.1761      0.00000000000000002
+    POO2     -80.918    10.141     1.000    0.1554   -0.9720    0.1761      0.00000000000000006
+    AFF3     120.670    22.700     1.000   -0.4706    0.7935    0.3859      0.00000000000000003
+    AFF4      59.330    22.700     1.000    0.4706    0.7935    0.3859     -0.00000000000000005
+    PPO3    -120.670    22.700     1.000   -0.4706   -0.7935    0.3859      0.00000000000000003
+    PPO4     -59.330    22.700     1.000    0.4706   -0.7935    0.3859     -0.00000000000000005
+    AFF5     129.623    11.966     1.000   -0.6239    0.7535    0.2073     -0.00000000000000003
+    AFF6      50.377    11.966     1.000    0.6239    0.7535    0.2073      0.00000000000000008
+    PPO5    -129.623    11.966     1.000   -0.6239   -0.7535    0.2073     -0.00000000000000003
+    PPO6     -50.377    11.966     1.000    0.6239   -0.7535    0.2073      0.00000000000000008
+    AFF1     107.147    30.720     1.000   -0.2535    0.8215    0.5108     -0.00000000000000009
+    AFF2      72.853    30.720     1.000    0.2535    0.8215    0.5108     -0.00000000000000003
+    PPO1    -107.147    30.720     1.000   -0.2535   -0.8215    0.5108     -0.00000000000000009
+    PPO2     -72.853    30.720     1.000    0.2535   -0.8215    0.5108     -0.00000000000000003
+   AFF7h     132.702     6.068     1.000   -0.6744    0.7308    0.1057      0.00000000000000006
+   AFF8h      47.298     6.068     1.000    0.6744    0.7308    0.1057     -0.00000000000000004
+   PPO7h    -132.702     6.068     1.000   -0.6744   -0.7308    0.1057     -0.00000000000000003
+   PPO8h     -47.298     6.068     1.000    0.6744   -0.7308    0.1057     -0.00000000000000004
+   AFF5h     125.648    17.573     1.000   -0.5556    0.7747    0.3019     -0.00000000000000004
+   AFF6h      54.352    17.573     1.000    0.5556    0.7747    0.3019      0.00000000000000001
+   PPO5h    -125.648    17.573     1.000   -0.5556   -0.7747    0.3019     -0.00000000000000001
+   PPO6h     -54.352    17.573     1.000    0.5556   -0.7747    0.3019      0.00000000000000001
+   AFF3h     114.474    27.207     1.000   -0.3684    0.8095    0.4572      0.00000000000000001
+   AFF4h      65.526    27.207     1.000    0.3684    0.8095    0.4572      0.00000000000000009
+   PPO3h    -114.474    27.207     1.000   -0.3684   -0.8095    0.4572      0.00000000000000001
+   PPO4h     -65.526    27.207     1.000    0.3684   -0.8095    0.4572      0.00000000000000009
+   AFF1h      98.854    32.973     1.000   -0.1291    0.8289    0.5442      0.00000000000000004
+   AFF2h      81.146    32.973     1.000    0.1291    0.8289    0.5442      0.00000000000000007
+   PPO1h     -98.854    32.973     1.000   -0.1291   -0.8289    0.5442     -0.00000000000000010
+   PPO2h     -81.146    32.973     1.000    0.1291   -0.8289    0.5442      0.00000000000000007
+    FFC3     139.449    36.019     1.000   -0.6146    0.5258    0.5881      0.00000000000000004
+    FFC4      40.551    36.019     1.000    0.6146    0.5258    0.5881      0.00000000000000006
+    CPP3    -139.449    36.019     1.000   -0.6146   -0.5258    0.5881     -0.00000000000000004
+    CPP4     -40.551    36.019     1.000    0.6146   -0.5258    0.5881      0.00000000000000006
+    FFC5     148.658    18.605     1.000   -0.8094    0.4930    0.3190      0.00000000000000011
+    FFC6      31.342    18.605     1.000    0.8094    0.4930    0.3190      0.00000000000000011
+    CPP5    -148.658    18.605     1.000   -0.8094   -0.4930    0.3190      0.00000000000000011
+    CPP6     -31.342    18.605     1.000    0.8094   -0.4930    0.3190      0.00000000000000011
+    FFC1     121.162    50.192     1.000   -0.3313    0.5478    0.7682      0.00000000000000005
+    FFC2      58.838    50.192     1.000    0.3313    0.5478    0.7682      0.00000000000000001
+    CPP1    -121.162    50.192     1.000   -0.3313   -0.5478    0.7682      0.00000000000000005
+    CPP2     -58.838    50.192     1.000    0.3313   -0.5478    0.7682      0.00000000000000001
+   FFT7h     151.293     9.383     1.000   -0.8653    0.4739    0.1630      0.00000000000000001
+   FFT8h      28.707     9.383     1.000    0.8653    0.4739    0.1630     -0.00000000000000004
+   TPP7h    -151.293     9.383     1.000   -0.8653   -0.4739    0.1630      0.00000000000000003
+   TPP8h     -28.707     9.383     1.000    0.8653   -0.4739    0.1630     -0.00000000000000004
+   FFC5h     144.847    27.547     1.000   -0.7249    0.5105    0.4625      0.00000000000000010
+   FFC6h      35.153    27.547     1.000    0.7249    0.5105    0.4625      0.00000000000000005
+   CPP5h    -144.847    27.547     1.000   -0.7249   -0.5105    0.4625      0.00000000000000010
+   CPP6h     -35.153    27.547     1.000    0.7249   -0.5105    0.4625      0.00000000000000005
+   FFC3h     131.815    43.741     1.000   -0.4817    0.5385    0.6914     -0.00000000000000011
+   FFC4h      48.185    43.741     1.000    0.4817    0.5385    0.6914     -0.00000000000000006
+   CPP3h    -131.815    43.741     1.000   -0.4817   -0.5385    0.6914     -0.00000000000000002
+   CPP4h     -48.185    43.741     1.000    0.4817   -0.5385    0.6914     -0.00000000000000006
+   FFC1h     106.951    54.636     1.000   -0.1687    0.5536    0.8155      0.00000000000000003
+   FFC2h      73.049    54.636     1.000    0.1687    0.5536    0.8155      0.00000000000000004
+   CPP1h    -106.951    54.636     1.000   -0.1687   -0.5536    0.8155      0.00000000000000006
+   CPP2h     -73.049    54.636     1.000    0.1687   -0.5536    0.8155      0.00000000000000004
+    FCC3     165.214    43.935     1.000   -0.6963    0.1838    0.6938     -0.00000000000000008
+    FCC4      14.786    43.935     1.000    0.6963    0.1838    0.6938     -0.00000000000000001
+    CCP3    -165.214    43.935     1.000   -0.6963   -0.1838    0.6938     -0.00000000000000008
+    CCP4     -14.786    43.935     1.000    0.6963   -0.1838    0.6938     -0.00000000000000001
+    FCC5     169.351    22.070     1.000   -0.9108    0.1712    0.3757      0.00000000000000001
+    FCC6      10.649    22.070     1.000    0.9108    0.1712    0.3757      0.00000000000000004
+    CCP5    -169.351    22.070     1.000   -0.9108   -0.1712    0.3757     -0.00000000000000007
+    CCP6     -10.649    22.070     1.000    0.9108   -0.1712    0.3757      0.00000000000000004
+    FCC1     152.968    64.990     1.000   -0.3766    0.1922    0.9062      0.00000000000000002
+    FCC2      27.032    64.990     1.000    0.3766    0.1922    0.9062      0.00000000000000000
+    CCP1    -152.968    64.990     1.000   -0.3766   -0.1922    0.9062      0.00000000000000002
+    CCP2     -27.032    64.990     1.000    0.3766   -0.1922    0.9062      0.00000000000000000
+   FTT7h     170.382    11.048     1.000   -0.9677    0.1640    0.1916      0.00000000000000003
+   FTT8h       9.618    11.048     1.000    0.9677    0.1640    0.1916      0.00000000000000004
+   TTP7h    -170.382    11.048     1.000   -0.9677   -0.1640    0.1916      0.00000000000000003
+   TTP8h      -9.618    11.048     1.000    0.9677   -0.1640    0.1916      0.00000000000000004
+   FCC5h     167.745    33.045     1.000   -0.8191    0.1779    0.5453      0.00000000000000001
+   FCC6h      12.255    33.045     1.000    0.8191    0.1779    0.5453     -0.00000000000000002
+   CCP5h    -167.745    33.045     1.000   -0.8191   -0.1779    0.5453      0.00000000000000006
+   CCP6h     -12.255    33.045     1.000    0.8191   -0.1779    0.5453     -0.00000000000000002
+   FCC3h     160.973    54.657     1.000   -0.5469    0.1886    0.8157     -0.00000000000000004
+   FCC4h      19.027    54.657     1.000    0.5469    0.1886    0.8157     -0.00000000000000002
+   CCP3h    -160.973    54.657     1.000   -0.5469   -0.1886    0.8157      0.00000000000000002
+   CCP4h     -19.027    54.657     1.000    0.5469   -0.1886    0.8157     -0.00000000000000002
+   FCC1h     134.645    74.147     1.000   -0.1920    0.1943    0.9620     -0.00000000000000006
+   FCC2h      45.355    74.147     1.000    0.1920    0.1943    0.9620     -0.00000000000000005
+   CCP1h    -134.645    74.147     1.000   -0.1920   -0.1943    0.9620     -0.00000000000000006
+   CCP2h     -45.355    74.147     1.000    0.1920   -0.1943    0.9620     -0.00000000000000005
+      A1     180.000   -22.500     1.000   -0.9239    0.0000   -0.3827     -0.00000000000000002
+      A2       0.000   -22.500     1.000    0.9239    0.0000   -0.3827     -0.00000000000000002
+     LPA     180.000   -22.500     1.000   -0.9239    0.0000   -0.3827     -0.00000000000000002
+     RPA       0.000   -22.500     1.000    0.9239    0.0000   -0.3827     -0.00000000000000002
+      LM    -162.000   -22.500     1.000   -0.8787   -0.2855   -0.3827      0.00000000000000006
+      RM     -18.000   -22.500     1.000    0.8787   -0.2855   -0.3827     -0.00000000000000000
+       1      36.000   -22.000     1.000    0.7501    0.5450   -0.3746      0.00000000000000001
+       2      47.000    -6.000     1.000    0.6783    0.7273   -0.1045     -0.00000000000000009
+       3      56.000    10.000     1.000    0.5507    0.8164    0.1736     -0.00000000000000004
+       4      72.000    26.000     1.000    0.2777    0.8548    0.4384      0.00000000000000001
+       5      78.000    42.000     1.000    0.1545    0.7269    0.6691     -0.00000000000000002
+       6      90.000    58.000     1.000    0.0000    0.5299    0.8480     -0.00000000000000008
+       7     126.000    74.000     1.000   -0.1620    0.2230    0.9613      0.00000000000000006
+       8      54.000   -22.000     1.000    0.5450    0.7501   -0.3746      0.00000000000000001
+       9      64.000    -6.000     1.000    0.4360    0.8939   -0.1045     -0.00000000000000008
+      10      73.000    10.000     1.000    0.2879    0.9418    0.1736     -0.00000000000000001
+      11      90.000    26.000     1.000    0.0000    0.8988    0.4384      0.00000000000000007
+      12     102.000    42.000     1.000   -0.1545    0.7269    0.6691      0.00000000000000001
+      13     126.000    58.000     1.000   -0.3115    0.4287    0.8480     -0.00000000000000004
+      14      72.000   -22.000     1.000    0.2865    0.8818   -0.3746      0.00000000000000006
+      15      81.000    -6.000     1.000    0.1556    0.9823   -0.1045      0.00000000000000000
+      16      90.000    10.000     1.000    0.0000    0.9848    0.1736     -0.00000000000000008
+      17      90.000   -33.750     1.000    0.0000    0.8315   -0.5556     -0.00000000000000005
+      18      99.000    -6.000     1.000   -0.1556    0.9823   -0.1045     -0.00000000000000003
+      19     108.000    10.000     1.000   -0.3043    0.9366    0.1736      0.00000000000000003
+      20     108.000    26.000     1.000   -0.2777    0.8548    0.4384     -0.00000000000000005
+      21     126.000    42.000     1.000   -0.4368    0.6012    0.6691     -0.00000000000000001
+      22     108.000   -22.000     1.000   -0.2865    0.8818   -0.3746     -0.00000000000000000
+      23     116.000    -6.000     1.000   -0.4360    0.8939   -0.1045      0.00000000000000002
+      24     126.000    10.000     1.000   -0.5789    0.7967    0.1736     -0.00000000000000004
+      25     126.000    26.000     1.000   -0.5283    0.7271    0.4384     -0.00000000000000010
+      26     126.000   -22.000     1.000   -0.5450    0.7501   -0.3746      0.00000000000000001
+      27     133.000    -6.000     1.000   -0.6783    0.7273   -0.1045      0.00000000000000007
+      28     142.000    10.000     1.000   -0.7760    0.6063    0.1736      0.00000000000000000
+      29     144.000    26.000     1.000   -0.7271    0.5283    0.4384     -0.00000000000000002
+      30     150.000    42.000     1.000   -0.6436    0.3716    0.6691      0.00000000000000004
+      31     162.000    58.000     1.000   -0.5040    0.1638    0.8480     -0.00000000000000003
+      32    -162.000    74.000     1.000   -0.2621   -0.0852    0.9613      0.00000000000000007
+      33     144.000   -22.000     1.000   -0.7501    0.5450   -0.3746     -0.00000000000000004
+      34     150.000    -6.000     1.000   -0.8613    0.4973   -0.1045      0.00000000000000007
+      35     159.000    10.000     1.000   -0.9194    0.3529    0.1736      0.00000000000000004
+      36     162.000    26.000     1.000   -0.8548    0.2777    0.4384      0.00000000000000004
+      37     174.000    42.000     1.000   -0.7391    0.0777    0.6691      0.00000000000000005
+      38    -162.000    58.000     1.000   -0.5040   -0.1638    0.8480     -0.00000000000000003
+      39     162.000   -22.000     1.000   -0.8818    0.2865   -0.3746      0.00000000000000006
+      40     167.000    -6.000     1.000   -0.9690    0.2237   -0.1045      0.00000000000000008
+      41     176.000    10.000     1.000   -0.9824    0.0687    0.1736      0.00000000000000009
+      42     180.000    26.000     1.000   -0.8988    0.0000    0.4384      0.00000000000000007
+      43    -162.000    42.000     1.000   -0.7068   -0.2296    0.6691     -0.00000000000000003
+      44     150.000   -38.000     1.000   -0.6824    0.3940   -0.6157     -0.00000000000000003
+      45     180.000   -22.000     1.000   -0.9272    0.0000   -0.3746      0.00000000000000003
+      46    -176.000    -6.000     1.000   -0.9921   -0.0694   -0.1045      0.00000000000000002
+      47    -167.000    10.000     1.000   -0.9596   -0.2215    0.1736      0.00000000000000009
+      48    -162.000    26.000     1.000   -0.8548   -0.2777    0.4384      0.00000000000000004
+      49     170.000   -38.000     1.000   -0.7760    0.1368   -0.6157      0.00000000000000005
+      50    -159.000    -6.000     1.000   -0.9285   -0.3564   -0.1045      0.00000000000000003
+      51    -150.000    10.000     1.000   -0.8529   -0.4924    0.1736      0.00000000000000009
+      52    -144.000    26.000     1.000   -0.7271   -0.5283    0.4384     -0.00000000000000002
+      53    -138.000    42.000     1.000   -0.5523   -0.4973    0.6691      0.00000000000000005
+      54    -126.000    58.000     1.000   -0.3115   -0.4287    0.8480     -0.00000000000000004
+      55     -90.000    74.000     1.000    0.0000   -0.2756    0.9613      0.00000000000000005
+      56    -170.000   -38.000     1.000   -0.7760   -0.1368   -0.6157      0.00000000000000005
+      57    -157.000   -22.000     1.000   -0.8535   -0.3623   -0.3746     -0.00000000000000002
+      58    -142.000    -6.000     1.000   -0.7837   -0.6123   -0.1045     -0.00000000000000006
+      59    -133.000    10.000     1.000   -0.6716   -0.7202    0.1736      0.00000000000000008
+      60    -126.000    26.000     1.000   -0.5283   -0.7271    0.4384     -0.00000000000000010
+      61    -114.000    42.000     1.000   -0.3023   -0.6789    0.6691      0.00000000000000006
+      62     -90.000    58.000     1.000    0.0000   -0.5299    0.8480     -0.00000000000000008
+      63    -150.000   -38.000     1.000   -0.6824   -0.3940   -0.6157     -0.00000000000000003
+      64    -139.000   -22.000     1.000   -0.6998   -0.6083   -0.3746      0.00000000000000003
+      65    -125.000    -6.000     1.000   -0.5704   -0.8147   -0.1045     -0.00000000000000002
+      66    -116.000    10.000     1.000   -0.4317   -0.8851    0.1736     -0.00000000000000004
+      67    -108.000    26.000     1.000   -0.2777   -0.8548    0.4384     -0.00000000000000005
+      68     -90.000    42.000     1.000    0.0000   -0.7431    0.6691      0.00000000000000005
+      69    -130.000   -38.000     1.000   -0.5065   -0.6037   -0.6157      0.00000000000000000
+      70    -122.000   -22.000     1.000   -0.4913   -0.7863   -0.3746      0.00000000000000010
+      71    -108.000    -6.000     1.000   -0.3073   -0.9458   -0.1045      0.00000000000000009
+      72     -99.000    10.000     1.000   -0.1541   -0.9727    0.1736     -0.00000000000000007
+      73     -90.000    26.000     1.000    0.0000   -0.8988    0.4384      0.00000000000000007
+      74    -110.000   -38.000     1.000   -0.2695   -0.7405   -0.6157      0.00000000000000002
+      75    -100.000   -22.000     1.000   -0.1610   -0.9131   -0.3746     -0.00000000000000009
+      76     -90.000    -6.000     1.000    0.0000   -0.9945   -0.1045     -0.00000000000000009
+      77     -81.000    10.000     1.000    0.1541   -0.9727    0.1736     -0.00000000000000004
+      78     -72.000    26.000     1.000    0.2777   -0.8548    0.4384      0.00000000000000001
+      79     -66.000    42.000     1.000    0.3023   -0.6789    0.6691      0.00000000000000003
+      80     -54.000    58.000     1.000    0.3115   -0.4287    0.8480     -0.00000000000000005
+      81     -18.000    74.000     1.000    0.2621   -0.0852    0.9613      0.00000000000000006
+      82     -90.000   -38.000     1.000    0.0000   -0.7880   -0.6157     -0.00000000000000007
+      83     -80.000   -22.000     1.000    0.1610   -0.9131   -0.3746     -0.00000000000000006
+      84     -72.000    -6.000     1.000    0.3073   -0.9458   -0.1045     -0.00000000000000005
+      85     -64.000    10.000     1.000    0.4317   -0.8851    0.1736      0.00000000000000006
+      86     -54.000    26.000     1.000    0.5283   -0.7271    0.4384      0.00000000000000002
+      87     -42.000    42.000     1.000    0.5523   -0.4973    0.6691      0.00000000000000004
+      88     -18.000    58.000     1.000    0.5040   -0.1638    0.8480     -0.00000000000000005
+      89     -70.000   -38.000     1.000    0.2695   -0.7405   -0.6157      0.00000000000000008
+      90     -59.000   -22.000     1.000    0.4775   -0.7948   -0.3746     -0.00000000000000006
+      91     -55.000    -6.000     1.000    0.5704   -0.8147   -0.1045      0.00000000000000004
+      92     -47.000    10.000     1.000    0.6716   -0.7202    0.1736     -0.00000000000000008
+      93     -36.000    26.000     1.000    0.7271   -0.5283    0.4384      0.00000000000000002
+      94     -18.000    42.000     1.000    0.7068   -0.2296    0.6691      0.00000000000000009
+      95     -50.000   -38.000     1.000    0.5065   -0.6037   -0.6157      0.00000000000000000
+      96     -41.000   -22.000     1.000    0.6998   -0.6083   -0.3746      0.00000000000000003
+      97     -38.000    -6.000     1.000    0.7837   -0.6123   -0.1045      0.00000000000000011
+      98     -30.000    10.000     1.000    0.8529   -0.4924    0.1736     -0.00000000000000005
+      99     -18.000    26.000     1.000    0.8548   -0.2777    0.4384     -0.00000000000000002
+     100     -30.000   -38.000     1.000    0.6824   -0.3940   -0.6157      0.00000000000000002
+     101     -23.000   -22.000     1.000    0.8535   -0.3623   -0.3746     -0.00000000000000002
+     102     -21.000    -6.000     1.000    0.9285   -0.3564   -0.1045      0.00000000000000007
+     103     -13.000    10.000     1.000    0.9596   -0.2215    0.1736      0.00000000000000001
+     104       0.000    26.000     1.000    0.8988    0.0000    0.4384      0.00000000000000007
+     105       6.000    42.000     1.000    0.7391    0.0777    0.6691      0.00000000000000007
+     106      18.000    58.000     1.000    0.5040    0.1638    0.8480     -0.00000000000000005
+     107      54.000    74.000     1.000    0.1620    0.2230    0.9613      0.00000000000000006
+     108     -10.000   -38.000     1.000    0.7760   -0.1368   -0.6157      0.00000000000000007
+     109      -4.000    -6.000     1.000    0.9921   -0.0694   -0.1045     -0.00000000000000001
+     110       4.000    10.000     1.000    0.9824    0.0687    0.1736      0.00000000000000006
+     111      18.000    26.000     1.000    0.8548    0.2777    0.4384     -0.00000000000000002
+     112      30.000    42.000     1.000    0.6436    0.3716    0.6691     -0.00000000000000006
+     113      54.000    58.000     1.000    0.3115    0.4287    0.8480     -0.00000000000000005
+     114      10.000   -38.000     1.000    0.7760    0.1368   -0.6157      0.00000000000000007
+     115       0.000   -22.000     1.000    0.9272    0.0000   -0.3746      0.00000000000000003
+     116      13.000    -6.000     1.000    0.9690    0.2237   -0.1045     -0.00000000000000001
+     117      21.000    10.000     1.000    0.9194    0.3529    0.1736      0.00000000000000004
+     118      36.000    26.000     1.000    0.7271    0.5283    0.4384      0.00000000000000002
+     119      54.000    42.000     1.000    0.4368    0.6012    0.6691      0.00000000000000004
+     120      30.000   -38.000     1.000    0.6824    0.3940   -0.6157      0.00000000000000002
+     121      18.000   -22.000     1.000    0.8818    0.2865   -0.3746      0.00000000000000003
+     122      30.000    -6.000     1.000    0.8613    0.4973   -0.1045     -0.00000000000000007
+     123      38.000    10.000     1.000    0.7760    0.6063    0.1736      0.00000000000000004
+     124      54.000    26.000     1.000    0.5283    0.7271    0.4384      0.00000000000000002
+     125      50.000   -38.000     1.000    0.5065    0.6037   -0.6157      0.00000000000000000
+     126      70.000   -38.000     1.000    0.2695    0.7405   -0.6157      0.00000000000000008
+     127     110.000   -38.000     1.000   -0.2695    0.7405   -0.6157      0.00000000000000002
+     128     130.000   -38.000     1.000   -0.5065    0.6037   -0.6157      0.00000000000000000
+     129       0.000    90.000     1.000    0.0000    0.0000    1.0000      0.00000000000000000
diff --git a/mne/channels/data/montages/EGI_256.csd b/mne/channels/data/montages/EGI_256.csd
new file mode 100644
index 0000000..3da8637
--- /dev/null
+++ b/mne/channels/data/montages/EGI_256.csd
@@ -0,0 +1,258 @@
+// MatLab   Sphere coordinates [degrees]         Cartesian coordinates
+// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface
+      E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011
+      E2      44.600      -0.880       1.000    0.7119    0.7021   -0.0154   0.00000000000000000
+      E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000
+      E4      58.200      21.800       1.000    0.4893    0.7891    0.3714  -0.00000000000000011
+      E5      64.200      33.600       1.000    0.3625    0.7499    0.5534   0.00000000000000000
+      E6      70.800      45.500       1.000    0.2305    0.6619    0.7133  -0.00000000000000022
+      E7      77.200      56.700       1.000    0.1216    0.5354    0.8358   0.00000000000000000
+      E8      90.000      67.700       1.000    0.0000    0.3795    0.9252   0.00000000000000000
+      E9     127.300      78.300       1.000   -0.1229    0.1613    0.9792   0.00000000000000000
+     E10      51.200      -9.080       1.000    0.6188    0.7696   -0.1578   0.00000000000000000
+     E11      58.800       2.370       1.000    0.5176    0.8546    0.0414   0.00000000000000000
+     E12      66.800      14.300       1.000    0.3817    0.8907    0.2470  -0.00000000000000011
+     E13      73.800      25.200       1.000    0.2524    0.8689    0.4258   0.00000000000000022
+     E14      81.360      36.400       1.000    0.1209    0.7958    0.5934   0.00000000000000022
+     E15      90.000      46.900       1.000    0.0000    0.6833    0.7302   0.00000000000000000
+     E16     102.800      56.700       1.000   -0.1216    0.5354    0.8358   0.00000000000000000
+     E17     128.200      66.600       1.000   -0.2456    0.3121    0.9178  -0.00000000000000011
+     E18      66.600      -4.970       1.000    0.3957    0.9143   -0.0866  -0.00000000000000011
+     E19      74.000       4.680       1.000    0.2747    0.9581    0.0816  -0.00000000000000011
+     E20      81.960      15.700       1.000    0.1346    0.9532    0.2706   0.00000000000000022
+     E21      90.000      26.400       1.000    0.0000    0.8957    0.4446   0.00000000000000000
+     E22      98.640      36.400       1.000   -0.1209    0.7958    0.5934   0.00000000000000022
+     E23     109.200      45.500       1.000   -0.2305    0.6619    0.7133   0.00000000000000000
+     E24     127.200      54.200       1.000   -0.3537    0.4659    0.8111   0.00000000000000000
+     E25      82.540      -3.260       1.000    0.1296    0.9899   -0.0569   0.00000000000000000
+     E26      90.000       5.370       1.000    0.0000    0.9956    0.0936   0.00000000000000000
+     E27      98.040      15.700       1.000   -0.1346    0.9532    0.2706   0.00000000000000022
+     E28     106.200      25.200       1.000   -0.2524    0.8689    0.4258   0.00000000000000022
+     E29     115.800      33.600       1.000   -0.3625    0.7499    0.5534   0.00000000000000000
+     E30     128.800      41.200       1.000   -0.4715    0.5864    0.6587   0.00000000000000000
+     E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000
+     E32      97.460      -3.260       1.000   -0.1296    0.9899   -0.0569   0.00000000000000000
+     E33     106.000       4.680       1.000   -0.2747    0.9581    0.0816  -0.00000000000000011
+     E34     113.200      14.300       1.000   -0.3817    0.8907    0.2470  -0.00000000000000022
+     E35     121.800      21.800       1.000   -0.4893    0.7891    0.3714  -0.00000000000000011
+     E36     128.500      30.200       1.000   -0.5380    0.6764    0.5030   0.00000000000000022
+     E37     113.400      -4.970       1.000   -0.3957    0.9143   -0.0866   0.00000000000000000
+     E38     121.200       2.370       1.000   -0.5176    0.8546    0.0414   0.00000000000000000
+     E39     128.300      11.000       1.000   -0.6084    0.7704    0.1908   0.00000000000000000
+     E40     135.300      20.800       1.000   -0.6645    0.6576    0.3551   0.00000000000000000
+     E41     140.600      32.000       1.000   -0.6553    0.5383    0.5299   0.00000000000000000
+     E42     144.500      44.000       1.000   -0.5856    0.4177    0.6947   0.00000000000000000
+     E43     151.000      54.800       1.000   -0.5042    0.2795    0.8171   0.00000000000000000
+     E44     163.200      66.400       1.000   -0.3833    0.1157    0.9164   0.00000000000000000
+     E45     197.000      77.300       1.000   -0.2102   -0.0643    0.9755   0.00000000000000000
+     E46     128.800      -9.080       1.000   -0.6188    0.7696   -0.1578   0.00000000000000000
+     E47     135.400      -0.880       1.000   -0.7119    0.7021   -0.0154  -0.00000000000000011
+     E48     142.500       8.460       1.000   -0.7847    0.6021    0.1471   0.00000000000000000
+     E49     149.200      19.400       1.000   -0.8102    0.4830    0.3322   0.00000000000000000
+     E50     155.300      32.200       1.000   -0.7688    0.3536    0.5329   0.00000000000000000
+     E51     162.400      44.200       1.000   -0.6834    0.2168    0.6972   0.00000000000000000
+     E52     173.500      54.500       1.000   -0.5770    0.0657    0.8141   0.00000000000000000
+     E53     197.000      65.600       1.000   -0.3951   -0.1208    0.9107   0.00000000000000000
+     E54     142.300     -14.000       1.000   -0.7677    0.5934   -0.2419   0.00000000000000000
+     E55     149.100      -4.100       1.000   -0.8559    0.5122   -0.0715   0.00000000000000000
+     E56     156.700       7.130       1.000   -0.9113    0.3925    0.1241   0.00000000000000022
+     E57     163.200      19.500       1.000   -0.9024    0.2725    0.3338   0.00000000000000000
+     E58     169.700      31.600       1.000   -0.8380    0.1523    0.5240   0.00000000000000000
+     E59     179.500      43.000       1.000   -0.7313    0.0064    0.6820   0.00000000000000000
+     E60     197.000      53.000       1.000   -0.5755   -0.1760    0.7986   0.00000000000000000
+     E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022
+     E62     165.100      -5.730       1.000   -0.9615    0.2558   -0.0998   0.00000000000000022
+     E63     171.400       6.890       1.000   -0.9816    0.1485    0.1200   0.00000000000000022
+     E64     177.200      19.000       1.000   -0.9444    0.0462    0.3256   0.00000000000000000
+     E65     184.300      31.100       1.000   -0.8539   -0.0642    0.5165   0.00000000000000000
+     E66     196.000      39.900       1.000   -0.7374   -0.2115    0.6414   0.00000000000000000
+     E67     167.300     -27.900       1.000   -0.8621    0.1943   -0.4679   0.00000000000000000
+     E68     172.300     -17.500       1.000   -0.9451    0.1278   -0.3007   0.00000000000000000
+     E69     179.500      -6.970       1.000   -0.9926    0.0087   -0.1213   0.00000000000000000
+     E70     185.400       5.990       1.000   -0.9901   -0.0936    0.1044   0.00000000000000022
+     E71     191.000      18.700       1.000   -0.9298   -0.1807    0.3206   0.00000000000000000
+     E72     197.000      28.500       1.000   -0.8404   -0.2569    0.4772   0.00000000000000000
+     E73     174.500     -38.200       1.000   -0.7822    0.0753   -0.6184   0.00000000000000022
+     E74     193.000      -6.630       1.000   -0.9679   -0.2234   -0.1155   0.00000000000000000
+     E75     199.000       7.590       1.000   -0.9372   -0.3227    0.1321   0.00000000000000000
+     E76     205.000      19.800       1.000   -0.8527   -0.3976    0.3387  -0.00000000000000011
+     E77     209.000      31.900       1.000   -0.7425   -0.4116    0.5284   0.00000000000000000
+     E78     214.000      43.600       1.000   -0.6004   -0.4050    0.6896   0.00000000000000000
+     E79     221.000      55.600       1.000   -0.4264   -0.3707    0.8251  -0.00000000000000011
+     E80     233.000      67.400       1.000   -0.2313   -0.3069    0.9232   0.00000000000000000
+     E81     -90.000      78.400       1.000    0.0000   -0.2011    0.9796  -0.00000000000000011
+     E82     183.900     -45.800       1.000   -0.6956   -0.0474   -0.7169   0.00000000000000000
+     E83     205.000     -15.000       1.000   -0.8754   -0.4082   -0.2588   0.00000000000000000
+     E84     206.000      -3.510       1.000   -0.8971   -0.4375   -0.0612  -0.00000000000000022
+     E85     213.000      10.000       1.000   -0.8259   -0.5364    0.1736  -0.00000000000000011
+     E86     218.000      22.700       1.000   -0.7270   -0.5680    0.3859   0.00000000000000000
+     E87     225.000      35.300       1.000   -0.5771   -0.5771    0.5779   0.00000000000000000
+     E88     232.000      46.800       1.000   -0.4214   -0.5394    0.7290  -0.00000000000000011
+     E89     245.000      56.900       1.000   -0.2308   -0.4949    0.8377  -0.00000000000000011
+     E90     -90.000      67.500       1.000    0.0000   -0.3827    0.9239   0.00000000000000000
+     E91     195.000     -50.900       1.000   -0.6092   -0.1632   -0.7760   0.00000000000000000
+     E92     203.000     -42.700       1.000   -0.6765   -0.2872   -0.6782  -0.00000000000000022
+     E93     211.000     -32.500       1.000   -0.7229   -0.4344   -0.5373   0.00000000000000000
+     E94     212.000     -23.100       1.000   -0.7801   -0.4874   -0.3923   0.00000000000000000
+     E95     216.000     -12.400       1.000   -0.7901   -0.5741   -0.2147   0.00000000000000000
+     E96     221.000       0.666       1.000   -0.7547   -0.6560    0.0116   0.00000000000000000
+     E97     228.000      12.900       1.000   -0.6522   -0.7244    0.2233   0.00000000000000022
+     E98     233.000      24.900       1.000   -0.5459   -0.7244    0.4210   0.00000000000000000
+     E99     241.000      36.400       1.000   -0.3902   -0.7040    0.5934   0.00000000000000000
+    E100     251.000      46.900       1.000   -0.2225   -0.6460    0.7302   0.00000000000000000
+    E101     -90.000      44.200       1.000    0.0000   -0.7169    0.6972   0.00000000000000000
+    E102     211.000     -47.800       1.000   -0.5758   -0.3460   -0.7408   0.00000000000000000
+    E103     217.000     -39.900       1.000   -0.6127   -0.4617   -0.6414   0.00000000000000000
+    E104     223.000     -29.500       1.000   -0.6365   -0.5936   -0.4924   0.00000000000000000
+    E105     224.000     -20.500       1.000   -0.6738   -0.6507   -0.3502   0.00000000000000000
+    E106     228.000      -8.840       1.000   -0.6612   -0.7343   -0.1537   0.00000000000000022
+    E107     235.000       2.900       1.000   -0.5728   -0.8181    0.0506   0.00000000000000000
+    E108     242.000      14.600       1.000   -0.4543   -0.8544    0.2521   0.00000000000000000
+    E109     248.000      25.700       1.000   -0.3375   -0.8355    0.4337  -0.00000000000000011
+    E110     257.000      36.000       1.000   -0.1820   -0.7883    0.5878   0.00000000000000000
+    E111     226.000     -43.800       1.000   -0.5014   -0.5192   -0.6921   0.00000000000000000
+    E112     230.000     -36.300       1.000   -0.5180   -0.6174   -0.5920   0.00000000000000000
+    E113     235.000     -25.900       1.000   -0.5160   -0.7369   -0.4368  -0.00000000000000022
+    E114     235.000     -17.500       1.000   -0.5470   -0.7812   -0.3007   0.00000000000000000
+    E115     244.000      -6.240       1.000   -0.4358   -0.8935   -0.1087   0.00000000000000000
+    E116     251.000       4.850       1.000   -0.3244   -0.9421    0.0845   0.00000000000000000
+    E117     258.000      15.500       1.000   -0.2004   -0.9426    0.2672   0.00000000000000000
+    E118     263.000      25.200       1.000   -0.1103   -0.8981    0.4258   0.00000000000000000
+    E119     -90.000      33.400       1.000    0.0000   -0.8348    0.5505  -0.00000000000000011
+    E120     237.000     -41.100       1.000   -0.4104   -0.6320   -0.6574   0.00000000000000022
+    E121     242.000     -33.400       1.000   -0.3919   -0.7371   -0.5505  -0.00000000000000022
+    E122     247.000     -23.400       1.000   -0.3586   -0.8448   -0.3971   0.00000000000000000
+    E123     252.000     -11.200       1.000   -0.3031   -0.9329   -0.1942   0.00000000000000000
+    E124     257.000      -3.660       1.000   -0.2245   -0.9724   -0.0638   0.00000000000000022
+    E125     264.000       5.580       1.000   -0.1040   -0.9898    0.0972   0.00000000000000000
+    E126     -90.000      15.400       1.000    0.0000   -0.9641    0.2656   0.00000000000000000
+    E127     -83.000      25.200       1.000    0.1103   -0.8981    0.4258   0.00000000000000000
+    E128     -77.000      36.000       1.000    0.1820   -0.7883    0.5878   0.00000000000000000
+    E129     -71.000      46.900       1.000    0.2225   -0.6460    0.7302   0.00000000000000000
+    E130     -65.000      56.900       1.000    0.2308   -0.4949    0.8377  -0.00000000000000011
+    E131     -53.000      67.400       1.000    0.2313   -0.3069    0.9232   0.00000000000000000
+    E132     -17.000      77.300       1.000    0.2102   -0.0643    0.9755   0.00000000000000000
+    E133     248.000     -36.400       1.000   -0.3015   -0.7463   -0.5934   0.00000000000000022
+    E134     253.000     -30.700       1.000   -0.2514   -0.8223   -0.5105  -0.00000000000000011
+    E135     258.000     -19.400       1.000   -0.1961   -0.9226   -0.3322  -0.00000000000000011
+    E136     265.000     -12.900       1.000   -0.0850   -0.9711   -0.2233   0.00000000000000000
+    E137     -90.000      -5.280       1.000    0.0000   -0.9958   -0.0920   0.00000000000000000
+    E138     -84.000       5.580       1.000    0.1040   -0.9898    0.0972  -0.00000000000000022
+    E139     -78.000      15.500       1.000    0.2004   -0.9426    0.2672  -0.00000000000000011
+    E140     -68.000      25.700       1.000    0.3375   -0.8355    0.4337  -0.00000000000000011
+    E141     -61.000      36.400       1.000    0.3902   -0.7040    0.5934   0.00000000000000000
+    E142     -52.000      46.800       1.000    0.4214   -0.5394    0.7290   0.00000000000000000
+    E143     -41.000      55.600       1.000    0.4264   -0.3707    0.8251   0.00000000000000000
+    E144     -17.000      65.600       1.000    0.3951   -0.1208    0.9107   0.00000000000000000
+    E145     258.000     -35.800       1.000   -0.1686   -0.7933   -0.5850   0.00000000000000000
+    E146     264.000     -29.600       1.000   -0.0909   -0.8647   -0.4939   0.00000000000000000
+    E147     -90.000     -22.100       1.000    0.0000   -0.9265   -0.3762   0.00000000000000000
+    E148     -85.000     -12.900       1.000    0.0850   -0.9711   -0.2233   0.00000000000000000
+    E149     -77.000      -3.660       1.000    0.2245   -0.9724   -0.0638   0.00000000000000022
+    E150     -71.000       4.850       1.000    0.3244   -0.9421    0.0845  -0.00000000000000022
+    E151     -62.000      14.600       1.000    0.4543   -0.8544    0.2521   0.00000000000000000
+    E152     -53.000      24.900       1.000    0.5459   -0.7244    0.4210   0.00000000000000000
+    E153     -45.000      35.300       1.000    0.5771   -0.5771    0.5779   0.00000000000000000
+    E154     -34.000      43.600       1.000    0.6004   -0.4050    0.6896   0.00000000000000000
+    E155     -17.000      53.000       1.000    0.5755   -0.1760    0.7986   0.00000000000000000
+    E156     -84.000     -29.600       1.000    0.0909   -0.8647   -0.4939  -0.00000000000000011
+    E157     -78.000     -19.400       1.000    0.1961   -0.9226   -0.3322  -0.00000000000000022
+    E158     -72.000     -11.200       1.000    0.3031   -0.9329   -0.1942   0.00000000000000000
+    E159     -64.000      -6.240       1.000    0.4358   -0.8935   -0.1087   0.00000000000000022
+    E160     -55.000       2.900       1.000    0.5728   -0.8181    0.0506   0.00000000000000022
+    E161     -48.000      12.900       1.000    0.6522   -0.7244    0.2233   0.00000000000000000
+    E162     -38.000      22.700       1.000    0.7270   -0.5680    0.3859   0.00000000000000000
+    E163     -29.000      31.900       1.000    0.7425   -0.4116    0.5284   0.00000000000000000
+    E164     -16.000      39.900       1.000    0.7374   -0.2115    0.6414   0.00000000000000000
+    E165     -78.000     -35.800       1.000    0.1686   -0.7933   -0.5850   0.00000000000000000
+    E166     -73.000     -30.700       1.000    0.2514   -0.8223   -0.5105   0.00000000000000000
+    E167     -67.000     -23.400       1.000    0.3586   -0.8448   -0.3971   0.00000000000000000
+    E168     -55.000     -17.500       1.000    0.5470   -0.7812   -0.3007   0.00000000000000000
+    E169     -48.000      -8.840       1.000    0.6612   -0.7343   -0.1537   0.00000000000000022
+    E170     -41.000       0.666       1.000    0.7547   -0.6560    0.0116   0.00000000000000000
+    E171     -33.000      10.000       1.000    0.8259   -0.5364    0.1736  -0.00000000000000011
+    E172     -25.000      19.800       1.000    0.8527   -0.3976    0.3387  -0.00000000000000022
+    E173     -17.000      28.500       1.000    0.8404   -0.2569    0.4772   0.00000000000000000
+    E174     -68.000     -36.400       1.000    0.3015   -0.7463   -0.5934   0.00000000000000000
+    E175     -62.000     -33.400       1.000    0.3919   -0.7371   -0.5505  -0.00000000000000011
+    E176     -55.000     -25.900       1.000    0.5160   -0.7369   -0.4368  -0.00000000000000022
+    E177     -44.000     -20.500       1.000    0.6738   -0.6507   -0.3502   0.00000000000000000
+    E178     -36.000     -12.400       1.000    0.7901   -0.5741   -0.2147   0.00000000000000000
+    E179     -26.000      -3.510       1.000    0.8971   -0.4375   -0.0612  -0.00000000000000011
+    E180     -19.000       7.590       1.000    0.9372   -0.3227    0.1321   0.00000000000000022
+    E181     -11.000      18.700       1.000    0.9298   -0.1807    0.3206   0.00000000000000022
+    E182      -4.300      31.100       1.000    0.8539   -0.0642    0.5165   0.00000000000000000
+    E183       0.500      43.000       1.000    0.7313    0.0064    0.6820   0.00000000000000000
+    E184       6.500      54.500       1.000    0.5770    0.0657    0.8141   0.00000000000000000
+    E185      16.800      66.400       1.000    0.3833    0.1157    0.9164   0.00000000000000000
+    E186      52.700      78.300       1.000    0.1229    0.1613    0.9792   0.00000000000000000
+    E187     -57.000     -41.100       1.000    0.4104   -0.6320   -0.6574   0.00000000000000022
+    E188     -50.000     -36.300       1.000    0.5180   -0.6174   -0.5920  -0.00000000000000022
+    E189     -43.000     -29.500       1.000    0.6365   -0.5936   -0.4924   0.00000000000000000
+    E190     -32.000     -23.100       1.000    0.7801   -0.4874   -0.3923   0.00000000000000000
+    E191     -25.000     -15.000       1.000    0.8754   -0.4082   -0.2588   0.00000000000000000
+    E192     -13.000      -6.630       1.000    0.9679   -0.2234   -0.1155   0.00000000000000000
+    E193      -5.400       5.990       1.000    0.9901   -0.0936    0.1044   0.00000000000000022
+    E194       2.800      19.000       1.000    0.9444    0.0462    0.3256   0.00000000000000022
+    E195      10.300      31.600       1.000    0.8380    0.1523    0.5240   0.00000000000000000
+    E196      17.600      44.200       1.000    0.6834    0.2168    0.6972   0.00000000000000000
+    E197      29.000      54.800       1.000    0.5042    0.2795    0.8171   0.00000000000000000
+    E198      51.800      66.600       1.000    0.2456    0.3121    0.9178   0.00000000000000000
+    E199     -46.000     -43.800       1.000    0.5014   -0.5192   -0.6921   0.00000000000000000
+    E200     -37.000     -39.900       1.000    0.6127   -0.4617   -0.6414   0.00000000000000000
+    E201     -31.000     -32.500       1.000    0.7229   -0.4344   -0.5373   0.00000000000000000
+    E202       0.500      -6.970       1.000    0.9926    0.0087   -0.1213   0.00000000000000000
+    E203       8.600       6.890       1.000    0.9816    0.1485    0.1200   0.00000000000000044
+    E204      16.800      19.500       1.000    0.9024    0.2725    0.3338   0.00000000000000000
+    E205      24.700      32.200       1.000    0.7688    0.3536    0.5329   0.00000000000000000
+    E206      35.500      44.000       1.000    0.5856    0.4177    0.6947   0.00000000000000000
+    E207      52.800      54.200       1.000    0.3537    0.4659    0.8111   0.00000000000000000
+    E208     -31.000     -47.800       1.000    0.5758   -0.3460   -0.7408   0.00000000000000000
+    E209     -23.000     -42.700       1.000    0.6765   -0.2872   -0.6782   0.00000000000000000
+    E210       7.700     -17.500       1.000    0.9451    0.1278   -0.3007   0.00000000000000000
+    E211      14.900      -5.730       1.000    0.9615    0.2558   -0.0998  -0.00000000000000011
+    E212      23.300       7.130       1.000    0.9113    0.3925    0.1241   0.00000000000000022
+    E213      30.800      19.400       1.000    0.8102    0.4830    0.3322   0.00000000000000000
+    E214      39.400      32.000       1.000    0.6553    0.5383    0.5299   0.00000000000000000
+    E215      51.200      41.200       1.000    0.4715    0.5864    0.6587   0.00000000000000000
+    E216     -15.000     -50.900       1.000    0.6092   -0.1632   -0.7760   0.00000000000000000
+    E217      -3.900     -45.800       1.000    0.6956   -0.0474   -0.7169   0.00000000000000000
+    E218       5.500     -38.200       1.000    0.7822    0.0753   -0.6184   0.00000000000000022
+    E219      12.700     -27.900       1.000    0.8621    0.1943   -0.4679   0.00000000000000000
+    E220      22.000     -17.200       1.000    0.8857    0.3579   -0.2957  -0.00000000000000022
+    E221      30.900      -4.100       1.000    0.8559    0.5122   -0.0715   0.00000000000000000
+    E222      37.500       8.460       1.000    0.7847    0.6021    0.1471   0.00000000000000000
+    E223      44.700      20.800       1.000    0.6645    0.6576    0.3551   0.00000000000000000
+    E224      51.500      30.200       1.000    0.5380    0.6764    0.5030   0.00000000000000000
+    E225      23.100     -28.000       1.000    0.8122    0.3464   -0.4695   0.00000000000000000
+    E226      33.500     -28.800       1.000    0.7307    0.4837   -0.4818   0.00000000000000000
+    E227      18.500     -38.200       1.000    0.7452    0.2494   -0.6184   0.00000000000000000
+    E228      10.400     -46.300       1.000    0.6795    0.1247   -0.7230   0.00000000000000000
+    E229      -1.200     -53.100       1.000    0.6003   -0.0126   -0.7997   0.00000000000000000
+    E230      41.600     -32.900       1.000    0.6279    0.5574   -0.5432   0.00000000000000000
+    E231      29.900     -39.600       1.000    0.6680    0.3841   -0.6374   0.00000000000000000
+    E232      23.600     -46.600       1.000    0.6296    0.2751   -0.7266   0.00000000000000000
+    E233      13.200     -53.300       1.000    0.5818    0.1365   -0.8018   0.00000000000000022
+    E234      50.800     -35.100       1.000    0.5171    0.6340   -0.5750  -0.00000000000000011
+    E235      40.300     -41.300       1.000    0.5730    0.4859   -0.6600   0.00000000000000022
+    E236      34.400     -47.800       1.000    0.5542    0.3795   -0.7408   0.00000000000000000
+    E237      26.900     -54.600       1.000    0.5166    0.2621   -0.8151   0.00000000000000000
+    E238      60.300     -35.600       1.000    0.4029    0.7063   -0.5821   0.00000000000000022
+    E239      47.800     -45.000       1.000    0.4750    0.5238   -0.7071  -0.00000000000000011
+    E240      41.600     -50.500       1.000    0.4757    0.4223   -0.7716   0.00000000000000000
+    E241     119.700     -35.600       1.000   -0.4029    0.7063   -0.5821   0.00000000000000000
+    E242     132.200     -45.000       1.000   -0.4750    0.5238   -0.7071  -0.00000000000000011
+    E243     138.400     -50.500       1.000   -0.4757    0.4223   -0.7716   0.00000000000000000
+    E244     129.200     -35.100       1.000   -0.5171    0.6340   -0.5750  -0.00000000000000011
+    E245     139.700     -41.300       1.000   -0.5730    0.4859   -0.6600   0.00000000000000000
+    E246     145.600     -47.800       1.000   -0.5542    0.3795   -0.7408  -0.00000000000000011
+    E247     153.100     -54.600       1.000   -0.5166    0.2621   -0.8151   0.00000000000000000
+    E248     138.400     -32.900       1.000   -0.6279    0.5574   -0.5432  -0.00000000000000022
+    E249     150.100     -39.600       1.000   -0.6680    0.3841   -0.6374   0.00000000000000000
+    E250     156.400     -46.600       1.000   -0.6296    0.2751   -0.7266  -0.00000000000000011
+    E251     166.800     -53.300       1.000   -0.5818    0.1365   -0.8018   0.00000000000000022
+    E252     146.500     -28.800       1.000   -0.7307    0.4837   -0.4818   0.00000000000000000
+    E253     156.900     -28.000       1.000   -0.8122    0.3464   -0.4695   0.00000000000000000
+    E254     161.500     -38.200       1.000   -0.7452    0.2494   -0.6184   0.00000000000000000
+    E255     169.600     -46.300       1.000   -0.6795    0.1247   -0.7230   0.00000000000000000
+    E256     181.200     -53.100       1.000   -0.6003   -0.0126   -0.7997   0.00000000000000000
diff --git a/mne/channels/data/montages/GSN-HydroCel-128.sfp b/mne/channels/data/montages/GSN-HydroCel-128.sfp
new file mode 100644
index 0000000..56c94f8
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-128.sfp
@@ -0,0 +1,131 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	5.787677636	5.520863216	-2.577468644
+E2	5.291804727	6.709097557	0.307434896
+E3	3.864122447	7.63424051	3.067770143
+E4	2.868837559	7.145708546	4.989564557
+E5	1.479340453	5.68662139	6.812878187
+E6	0	3.806770224	7.891304964
+E7	-1.223800252	1.558864431	8.44043914
+E8	4.221901505	7.998817387	-1.354789681
+E9	2.695405558	8.884820317	1.088308144
+E10	1.830882336	8.708839134	3.18709115
+E11	0	7.96264703	5.044718001
+E12	-1.479340453	5.68662139	6.812878187
+E13	-2.435870762	3.254307219	7.608766206
+E14	1.270447661	9.479016328	-0.947183306
+E15	0	9.087440894	1.333345013
+E16	0	9.076490798	3.105438474
+E17	0	9.271139705	-2.211516434
+E18	-1.830882336	8.708839134	3.18709115
+E19	-2.868837559	7.145708546	4.989564557
+E20	-3.825797111	5.121648995	5.942844877
+E21	-1.270447661	9.479016328	-0.947183306
+E22	-2.695405558	8.884820317	1.088308144
+E23	-3.864122447	7.63424051	3.067770143
+E24	-4.459387187	6.021159964	4.365321482
+E25	-4.221901505	7.998817387	-1.354789681
+E26	-5.291804727	6.709097557	0.307434896
+E27	-5.682547954	5.453384344	2.836565436
+E28	-5.546670402	4.157847823	4.627615703
+E29	-4.762196763	2.697832099	6.297663028
+E30	-3.695490968	0.960411022	7.627828134
+E31	-1.955187826	-0.684381878	8.564858511
+E32	-5.787677636	5.520863216	-2.577468644
+E33	-6.399087198	4.127248875	-0.356852241
+E34	-6.823959684	2.968422112	2.430080351
+E35	-6.414469893	1.490027747	4.741794544
+E36	-5.47913021	0.284948655	6.38332782
+E37	-3.909902609	-1.519049882	7.764134929
+E38	-6.550732888	3.611543152	-3.353155926
+E39	-7.191620108	0.850096251	-0.882936903
+E40	-7.391919265	0.032151584	2.143634599
+E41	-6.905051715	-0.800953972	4.600056501
+E42	-5.956055073	-2.338984312	6.00361353
+E43	-6.518995129	2.417299399	-5.253637073
+E44	-6.840717711	1.278489412	-3.5553823
+E45	-7.304625099	-1.866238006	-0.629182006
+E46	-7.312517928	-2.298574078	2.385298838
+E47	-6.737313764	-3.011819533	4.178390203
+E48	-5.934584124	2.22697797	-7.934360742
+E49	-6.298127313	0.41663451	-6.069156425
+E50	-6.78248072	-4.023512045	-0.232191092
+E51	-6.558030032	-4.667036048	2.749989597
+E52	-5.831241498	-4.494821698	4.955347697
+E53	-4.193518856	-4.037020083	6.982920038
+E54	-2.270752074	-3.414835627	8.204556551
+E55	0	-2.138343513	8.791875902
+E56	-6.174969392	-2.458138877	-5.637380998
+E57	-6.580438308	-3.739554155	-2.991084431
+E58	-6.034746843	-5.755782196	0.051843011
+E59	-5.204501802	-6.437833018	2.984444293
+E60	-4.116929504	-6.061561438	5.365757296
+E61	-2.344914884	-5.481057427	7.057748614
+E62	0	-6.676694032	6.465208258
+E63	-5.333266171	-4.302240169	-5.613509789
+E64	-5.404091392	-5.870302681	-2.891640039
+E65	-4.645302298	-7.280552408	0.130139701
+E66	-3.608293164	-7.665487704	3.129931648
+E67	-1.844644417	-7.354417376	5.224001733
+E68	-3.784983913	-6.401014415	-5.260040689
+E69	-3.528848027	-7.603010836	-2.818037873
+E70	-2.738838019	-8.607966849	0.239368223
+E71	-1.404967401	-8.437486994	3.277284901
+E72	0	-7.829896826	4.687622229
+E73	-1.929652202	-7.497197868	-5.136777648
+E74	-1.125731192	-8.455208629	-2.632832329
+E75	0	-8.996686498	0.487952047
+E76	1.404967401	-8.437486994	3.277284901
+E77	1.844644417	-7.354417376	5.224001733
+E78	2.344914884	-5.481057427	7.057748614
+E79	2.270752074	-3.414835627	8.204556551
+E80	1.955187826	-0.684381878	8.564858511
+E81	0	-7.85891896	-4.945387489
+E82	1.125731192	-8.455208629	-2.632832329
+E83	2.738838019	-8.607966849	0.239368223
+E84	3.608293164	-7.665487704	3.129931648
+E85	4.116929504	-6.061561438	5.365757296
+E86	4.193518856	-4.037020083	6.982920038
+E87	3.909902609	-1.519049882	7.764134929
+E88	1.929652202	-7.497197868	-5.136777648
+E89	3.528848027	-7.603010836	-2.818037873
+E90	4.645302298	-7.280552408	0.130139701
+E91	5.204501802	-6.437833018	2.984444293
+E92	5.831241498	-4.494821698	4.955347697
+E93	5.956055073	-2.338984312	6.00361353
+E94	3.784983913	-6.401014415	-5.260040689
+E95	5.404091392	-5.870302681	-2.891640039
+E96	6.034746843	-5.755782196	0.051843011
+E97	6.558030032	-4.667036048	2.749989597
+E98	6.737313764	-3.011819533	4.178390203
+E99	5.333266171	-4.302240169	-5.613509789
+E100	6.580438308	-3.739554155	-2.991084431
+E101	6.78248072	-4.023512045	-0.232191092
+E102	7.312517928	-2.298574078	2.385298838
+E103	6.905051715	-0.800953972	4.600056501
+E104	5.47913021	0.284948655	6.38332782
+E105	3.695490968	0.960411022	7.627828134
+E106	1.223800252	1.558864431	8.44043914
+E107	6.174969392	-2.458138877	-5.637380998
+E108	7.304625099	-1.866238006	-0.629182006
+E109	7.391919265	0.032151584	2.143634599
+E110	6.414469893	1.490027747	4.741794544
+E111	4.762196763	2.697832099	6.297663028
+E112	2.435870762	3.254307219	7.608766206
+E113	6.298127313	0.41663451	-6.069156425
+E114	6.840717711	1.278489412	-3.5553823
+E115	7.191620108	0.850096251	-0.882936903
+E116	6.823959684	2.968422112	2.430080351
+E117	5.546670402	4.157847823	4.627615703
+E118	3.825797111	5.121648995	5.942844877
+E119	5.934584124	2.22697797	-7.934360742
+E120	6.518995129	2.417299399	-5.253637073
+E121	6.550732888	3.611543152	-3.353155926
+E122	6.399087198	4.127248875	-0.356852241
+E123	5.682547954	5.453384344	2.836565436
+E124	4.459387187	6.021159964	4.365321482
+E125	6.118458137	4.523870113	-4.409174427
+E126	3.743504949	6.649204911	-6.530243068
+E127	-3.743504949	6.649204911	-6.530243068
+E128	-6.118458137	4.523870113	-4.409174427
diff --git a/mne/channels/data/montages/GSN-HydroCel-129.sfp b/mne/channels/data/montages/GSN-HydroCel-129.sfp
new file mode 100644
index 0000000..fb222db
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-129.sfp
@@ -0,0 +1,132 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	5.787677636	5.520863216	-2.577468644
+E2	5.291804727	6.709097557	0.307434896
+E3	3.864122447	7.63424051	3.067770143
+E4	2.868837559	7.145708546	4.989564557
+E5	1.479340453	5.68662139	6.812878187
+E6	0	3.806770224	7.891304964
+E7	-1.223800252	1.558864431	8.44043914
+E8	4.221901505	7.998817387	-1.354789681
+E9	2.695405558	8.884820317	1.088308144
+E10	1.830882336	8.708839134	3.18709115
+E11	0	7.96264703	5.044718001
+E12	-1.479340453	5.68662139	6.812878187
+E13	-2.435870762	3.254307219	7.608766206
+E14	1.270447661	9.479016328	-0.947183306
+E15	0	9.087440894	1.333345013
+E16	0	9.076490798	3.105438474
+E17	0	9.271139705	-2.211516434
+E18	-1.830882336	8.708839134	3.18709115
+E19	-2.868837559	7.145708546	4.989564557
+E20	-3.825797111	5.121648995	5.942844877
+E21	-1.270447661	9.479016328	-0.947183306
+E22	-2.695405558	8.884820317	1.088308144
+E23	-3.864122447	7.63424051	3.067770143
+E24	-4.459387187	6.021159964	4.365321482
+E25	-4.221901505	7.998817387	-1.354789681
+E26	-5.291804727	6.709097557	0.307434896
+E27	-5.682547954	5.453384344	2.836565436
+E28	-5.546670402	4.157847823	4.627615703
+E29	-4.762196763	2.697832099	6.297663028
+E30	-3.695490968	0.960411022	7.627828134
+E31	-1.955187826	-0.684381878	8.564858511
+E32	-5.787677636	5.520863216	-2.577468644
+E33	-6.399087198	4.127248875	-0.356852241
+E34	-6.823959684	2.968422112	2.430080351
+E35	-6.414469893	1.490027747	4.741794544
+E36	-5.47913021	0.284948655	6.38332782
+E37	-3.909902609	-1.519049882	7.764134929
+E38	-6.550732888	3.611543152	-3.353155926
+E39	-7.191620108	0.850096251	-0.882936903
+E40	-7.391919265	0.032151584	2.143634599
+E41	-6.905051715	-0.800953972	4.600056501
+E42	-5.956055073	-2.338984312	6.00361353
+E43	-6.518995129	2.417299399	-5.253637073
+E44	-6.840717711	1.278489412	-3.5553823
+E45	-7.304625099	-1.866238006	-0.629182006
+E46	-7.312517928	-2.298574078	2.385298838
+E47	-6.737313764	-3.011819533	4.178390203
+E48	-5.934584124	2.22697797	-7.934360742
+E49	-6.298127313	0.41663451	-6.069156425
+E50	-6.78248072	-4.023512045	-0.232191092
+E51	-6.558030032	-4.667036048	2.749989597
+E52	-5.831241498	-4.494821698	4.955347697
+E53	-4.193518856	-4.037020083	6.982920038
+E54	-2.270752074	-3.414835627	8.204556551
+E55	0	-2.138343513	8.791875902
+E56	-6.174969392	-2.458138877	-5.637380998
+E57	-6.580438308	-3.739554155	-2.991084431
+E58	-6.034746843	-5.755782196	0.051843011
+E59	-5.204501802	-6.437833018	2.984444293
+E60	-4.116929504	-6.061561438	5.365757296
+E61	-2.344914884	-5.481057427	7.057748614
+E62	0	-6.676694032	6.465208258
+E63	-5.333266171	-4.302240169	-5.613509789
+E64	-5.404091392	-5.870302681	-2.891640039
+E65	-4.645302298	-7.280552408	0.130139701
+E66	-3.608293164	-7.665487704	3.129931648
+E67	-1.844644417	-7.354417376	5.224001733
+E68	-3.784983913	-6.401014415	-5.260040689
+E69	-3.528848027	-7.603010836	-2.818037873
+E70	-2.738838019	-8.607966849	0.239368223
+E71	-1.404967401	-8.437486994	3.277284901
+E72	0	-7.829896826	4.687622229
+E73	-1.929652202	-7.497197868	-5.136777648
+E74	-1.125731192	-8.455208629	-2.632832329
+E75	0	-8.996686498	0.487952047
+E76	1.404967401	-8.437486994	3.277284901
+E77	1.844644417	-7.354417376	5.224001733
+E78	2.344914884	-5.481057427	7.057748614
+E79	2.270752074	-3.414835627	8.204556551
+E80	1.955187826	-0.684381878	8.564858511
+E81	0	-7.85891896	-4.945387489
+E82	1.125731192	-8.455208629	-2.632832329
+E83	2.738838019	-8.607966849	0.239368223
+E84	3.608293164	-7.665487704	3.129931648
+E85	4.116929504	-6.061561438	5.365757296
+E86	4.193518856	-4.037020083	6.982920038
+E87	3.909902609	-1.519049882	7.764134929
+E88	1.929652202	-7.497197868	-5.136777648
+E89	3.528848027	-7.603010836	-2.818037873
+E90	4.645302298	-7.280552408	0.130139701
+E91	5.204501802	-6.437833018	2.984444293
+E92	5.831241498	-4.494821698	4.955347697
+E93	5.956055073	-2.338984312	6.00361353
+E94	3.784983913	-6.401014415	-5.260040689
+E95	5.404091392	-5.870302681	-2.891640039
+E96	6.034746843	-5.755782196	0.051843011
+E97	6.558030032	-4.667036048	2.749989597
+E98	6.737313764	-3.011819533	4.178390203
+E99	5.333266171	-4.302240169	-5.613509789
+E100	6.580438308	-3.739554155	-2.991084431
+E101	6.78248072	-4.023512045	-0.232191092
+E102	7.312517928	-2.298574078	2.385298838
+E103	6.905051715	-0.800953972	4.600056501
+E104	5.47913021	0.284948655	6.38332782
+E105	3.695490968	0.960411022	7.627828134
+E106	1.223800252	1.558864431	8.44043914
+E107	6.174969392	-2.458138877	-5.637380998
+E108	7.304625099	-1.866238006	-0.629182006
+E109	7.391919265	0.032151584	2.143634599
+E110	6.414469893	1.490027747	4.741794544
+E111	4.762196763	2.697832099	6.297663028
+E112	2.435870762	3.254307219	7.608766206
+E113	6.298127313	0.41663451	-6.069156425
+E114	6.840717711	1.278489412	-3.5553823
+E115	7.191620108	0.850096251	-0.882936903
+E116	6.823959684	2.968422112	2.430080351
+E117	5.546670402	4.157847823	4.627615703
+E118	3.825797111	5.121648995	5.942844877
+E119	5.934584124	2.22697797	-7.934360742
+E120	6.518995129	2.417299399	-5.253637073
+E121	6.550732888	3.611543152	-3.353155926
+E122	6.399087198	4.127248875	-0.356852241
+E123	5.682547954	5.453384344	2.836565436
+E124	4.459387187	6.021159964	4.365321482
+E125	6.118458137	4.523870113	-4.409174427
+E126	3.743504949	6.649204911	-6.530243068
+E127	-3.743504949	6.649204911	-6.530243068
+E128	-6.118458137	4.523870113	-4.409174427
+Cz	0	0	8.899186843
diff --git a/mne/channels/data/montages/GSN-HydroCel-256.sfp b/mne/channels/data/montages/GSN-HydroCel-256.sfp
new file mode 100644
index 0000000..2464e89
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-256.sfp
@@ -0,0 +1,259 @@
+FidNz 0.00000 10.56381 -2.05108
+FidT9 -7.82694 0.45386 -3.76056
+FidT10 7.82694 0.45386 -3.76056
+E1 6.96223 5.38242 -2.19061
+E2 6.48414 6.40424 -0.14004
+E3 5.69945 7.20796 1.79088
+E4 4.81093 7.77321 3.65006
+E5 3.61962 7.47782 5.50947
+E6 2.25278 6.46157 6.96317
+E7 1.18879 5.21755 8.13378
+E8 0.00000 3.59608 8.75111
+E9 -1.15339 1.51369 9.19904
+E10 5.94022 7.38337 -1.51513
+E11 5.07624 8.37264 0.40595
+E12 3.87946 9.03611 2.51559
+E13 2.60756 8.97868 4.39107
+E14 1.23344 8.11574 6.06161
+E15 0.00000 6.81181 7.28186
+E16 -1.18879 5.21755 8.13378
+E17 -2.29559 2.91372 8.55810
+E18 4.06489 9.40559 -0.89098
+E19 2.86784 10.01456 0.85212
+E20 1.42153 10.06322 2.84803
+E21 0.00000 9.40339 4.65829
+E22 -1.23344 8.11574 6.06161
+E23 -2.25278 6.46157 6.96317
+E24 -3.34467 4.40891 7.67253
+E25 1.39547 10.65281 -0.61138
+E26 0.00000 10.68996 1.00542
+E27 -1.42153 10.06322 2.84803
+E28 -2.60756 8.97868 4.39107
+E29 -3.61962 7.47782 5.50947
+E30 -4.49828 5.59395 6.28801
+E31 0.00000 10.56381 -2.05108
+E32 -1.39547 10.65281 -0.61138
+E33 -2.86784 10.01456 0.85212
+E34 -3.87946 9.03611 2.51559
+E35 -4.81093 7.77321 3.65006
+E36 -5.10466 6.41586 4.77815
+E37 -4.06489 9.40559 -0.89098
+E38 -5.07624 8.37264 0.40595
+E39 -5.69945 7.20796 1.79088
+E40 -6.16984 6.11292 3.29612
+E41 -6.01447 4.93908 4.85771
+E42 -5.33943 3.80220 6.32664
+E43 -4.64127 2.57224 7.50868
+E44 -3.53746 1.07133 8.47419
+E45 -1.99458 -0.60998 9.28870
+E46 -5.94022 7.38337 -1.51513
+E47 -6.48414 6.40424 -0.14004
+E48 -6.97545 5.35131 1.30741
+E49 -7.10064 4.23342 2.91874
+E50 -6.86564 3.16240 4.76800
+E51 -6.11380 1.94213 6.23844
+E52 -5.31389 0.60081 7.48811
+E53 -3.72368 -1.14573 8.58697
+E54 -6.96223 5.38242 -2.19061
+E55 -7.31613 4.37155 -0.61128
+E56 -7.66385 3.29619 1.04415
+E57 -7.62423 2.30205 2.81799
+E58 -7.36570 1.34368 4.60382
+E59 -6.70292 0.06004 6.23992
+E60 -5.40372 -1.61247 7.47343
+E61 -7.54098 3.05323 -2.51935
+E62 -7.77059 2.06323 -0.80729
+E63 -7.96921 1.20744 0.97332
+E64 -8.06621 0.40109 2.78565
+E65 -7.60767 -0.56840 4.59939
+E66 -6.81554 -1.94522 5.93053
+E67 -7.69315 1.74041 -4.18153
+E68 -7.74468 1.05291 -2.47059
+E69 -7.93758 0.07220 -0.96992
+E70 -7.98893 -0.75212 0.84194
+E71 -8.05947 -1.50296 2.76753
+E72 -7.56445 -2.31141 4.30327
+E73 -7.52646 0.73096 -5.96025
+E74 -7.76752 -1.84131 -0.92719
+E75 -7.79279 -2.73175 1.10033
+E76 -7.46191 -3.49308 2.95937
+E77 -6.86934 -3.79448 4.89401
+E78 -5.65276 -3.84604 6.52108
+E79 -4.12465 -3.54800 7.95405
+E80 -2.23647 -2.95809 8.92461
+E81 0.00000 -1.93834 9.45867
+E82 -7.12806 -0.49186 -7.34929
+E83 -7.37920 -3.49709 -2.18347
+E84 -7.52183 -3.70044 -0.51432
+E85 -7.15214 -4.71132 1.51762
+E86 -6.48817 -5.15829 3.47294
+E87 -5.53051 -5.46184 5.50189
+E88 -4.03809 -5.23807 7.04455
+E89 -2.29514 -4.87829 8.27223
+E90 0.00000 -3.74195 9.02791
+E91 -6.82585 -1.86426 -8.69399
+E92 -6.74047 -2.84840 -6.74712
+E93 -6.78379 -4.01784 -5.01755
+E94 -7.03346 -4.45090 -3.54895
+E95 -6.99052 -5.01694 -1.88810
+E96 -6.67571 -5.73608 0.10234
+E97 -5.96851 -6.52864 2.03293
+E98 -5.10822 -6.74936 3.92134
+E99 -3.75216 -6.67734 5.63719
+E100 -2.14874 -6.29190 7.11453
+E101 0.00000 -7.15042 6.95434
+E102 -6.36989 -3.82470 -8.20622
+E103 -6.24349 -4.62250 -6.49623
+E104 -6.09726 -5.61090 -4.67894
+E105 -6.31441 -6.01299 -3.25921
+E106 -5.98418 -6.74733 -1.40314
+E107 -5.23709 -7.57398 0.46627
+E108 -4.29098 -8.11323 2.38442
+E109 -3.24277 -8.15293 4.22025
+E110 -1.73181 -7.63850 5.69360
+E111 -5.63580 -5.80367 -7.74857
+E112 -5.38718 -6.45180 -6.16689
+E113 -5.08285 -7.32643 -4.32109
+E114 -5.27282 -7.46584 -2.87485
+E115 -4.13620 -8.61230 -1.04503
+E116 -3.13323 -9.13629 0.81878
+E117 -1.94503 -9.23415 2.62135
+E118 -1.09312 -8.74110 4.13810
+E119 0.00000 -8.09146 5.34087
+E120 -4.70608 -7.21970 -7.52955
+E121 -4.20415 -7.81153 -5.84368
+E122 -3.62234 -8.59338 -4.04243
+E123 -3.02717 -9.45363 -1.95941
+E124 -2.20152 -9.70916 -0.63755
+E125 -1.01682 -9.71656 0.95467
+E126 0.00000 -9.23206 2.54671
+E127 1.09312 -8.74110 4.13810
+E128 1.73181 -7.63850 5.69360
+E129 2.14874 -6.29190 7.11453
+E130 2.29514 -4.87829 8.27223
+E131 2.23647 -2.95809 8.92461
+E132 1.99458 -0.60998 9.28870
+E133 -3.45625 -8.57317 -6.82654
+E134 -2.71528 -8.94646 -5.55376
+E135 -2.03205 -9.56166 -3.44989
+E136 -0.91885 -9.62744 -2.21054
+E137 0.00000 -9.58535 -0.88629
+E138 1.01682 -9.71656 0.95467
+E139 1.94503 -9.23415 2.62135
+E140 3.24277 -8.15293 4.22025
+E141 3.75216 -6.67734 5.63719
+E142 4.03809 -5.23807 7.04455
+E143 4.12465 -3.54800 7.95405
+E144 3.72368 -1.14573 8.58697
+E145 -1.88533 -9.22031 -6.79889
+E146 -1.06111 -9.53369 -5.45325
+E147 0.00000 -9.48329 -3.84204
+E148 0.91885 -9.62744 -2.21054
+E149 2.20152 -9.70916 -0.63755
+E150 3.13323 -9.13629 0.81878
+E151 4.29098 -8.11323 2.38442
+E152 5.10822 -6.74936 3.92134
+E153 5.53051 -5.46184 5.50189
+E154 5.65276 -3.84604 6.52108
+E155 5.40372 -1.61247 7.47343
+E156 1.06111 -9.53369 -5.45325
+E157 2.03205 -9.56166 -3.44989
+E158 3.02717 -9.45363 -1.95941
+E159 4.13620 -8.61230 -1.04503
+E160 5.23709 -7.57398 0.46627
+E161 5.96851 -6.52864 2.03293
+E162 6.48817 -5.15829 3.47294
+E163 6.86934 -3.79448 4.89401
+E164 6.81554 -1.94522 5.93053
+E165 1.88533 -9.22031 -6.79889
+E166 2.71528 -8.94646 -5.55376
+E167 3.62234 -8.59338 -4.04243
+E168 5.27282 -7.46584 -2.87485
+E169 5.98418 -6.74733 -1.40314
+E170 6.67571 -5.73608 0.10234
+E171 7.15214 -4.71132 1.51762
+E172 7.46191 -3.49308 2.95937
+E173 7.56445 -2.31141 4.30327
+E174 3.45625 -8.57317 -6.82654
+E175 4.20415 -7.81153 -5.84368
+E176 5.08285 -7.32643 -4.32109
+E177 6.31441 -6.01299 -3.25921
+E178 6.99052 -5.01694 -1.88810
+E179 7.52183 -3.70044 -0.51432
+E180 7.79279 -2.73175 1.10033
+E181 8.05947 -1.50296 2.76753
+E182 7.60767 -0.56840 4.59939
+E183 6.70292 0.06004 6.23992
+E184 5.31389 0.60081 7.48811
+E185 3.53746 1.07133 8.47419
+E186 1.15339 1.51369 9.19904
+E187 4.70608 -7.21970 -7.52955
+E188 5.38718 -6.45180 -6.16689
+E189 6.09726 -5.61090 -4.67894
+E190 7.03346 -4.45090 -3.54895
+E191 7.37920 -3.49709 -2.18347
+E192 7.76752 -1.84131 -0.92719
+E193 7.98893 -0.75212 0.84194
+E194 8.06621 0.40109 2.78565
+E195 7.36570 1.34368 4.60382
+E196 6.11380 1.94213 6.23844
+E197 4.64127 2.57224 7.50868
+E198 2.29559 2.91372 8.55810
+E199 5.63580 -5.80367 -7.74857
+E200 6.24349 -4.62250 -6.49623
+E201 6.78379 -4.01784 -5.01755
+E202 7.93758 0.07220 -0.96992
+E203 7.96921 1.20744 0.97332
+E204 7.62423 2.30205 2.81799
+E205 6.86564 3.16240 4.76800
+E206 5.33943 3.80220 6.32664
+E207 3.34467 4.40891 7.67253
+E208 6.36989 -3.82470 -8.20622
+E209 6.74047 -2.84840 -6.74712
+E210 7.74468 1.05291 -2.47059
+E211 7.77059 2.06323 -0.80729
+E212 7.66385 3.29619 1.04415
+E213 7.10064 4.23342 2.91874
+E214 6.01447 4.93908 4.85771
+E215 4.49828 5.59395 6.28801
+E216 6.82585 -1.86426 -8.69399
+E217 7.12806 -0.49186 -7.34929
+E218 7.52646 0.73096 -5.96025
+E219 7.69315 1.74041 -4.18153
+E220 7.54098 3.05323 -2.51935
+E221 7.31613 4.37155 -0.61128
+E222 6.97545 5.35131 1.30741
+E223 6.16984 6.11292 3.29612
+E224 5.10466 6.41586 4.77815
+E225 7.62652 3.24782 -4.40493
+E226 7.24346 4.80120 -4.77214
+E227 7.55603 2.52648 -6.26962
+E228 7.38028 1.35743 -7.84943
+E229 6.86103 -0.14155 -9.14913
+E230 6.74159 5.99080 -5.83258
+E231 7.22458 4.14855 -6.88918
+E232 7.31422 3.19647 -8.44268
+E233 7.09051 1.66694 -9.77213
+E234 5.88750 7.22674 -6.54736
+E235 6.65934 5.64059 -7.65729
+E236 6.75138 4.62427 -9.03070
+E237 6.58044 3.33743 -10.39707
+E238 4.69146 8.22723 -6.78260
+E239 5.81346 6.42065 -8.65026
+E240 6.04363 5.37051 -9.81363
+E241 -4.69146 8.22723 -6.78260
+E242 -5.81346 6.42065 -8.65026
+E243 -6.04363 5.37051 -9.81363
+E244 -5.88750 7.22674 -6.54736
+E245 -6.65934 5.64059 -7.65729
+E246 -6.75138 4.62427 -9.03070
+E247 -6.58044 3.33743 -10.39707
+E248 -6.74159 5.99080 -5.83258
+E249 -7.22458 4.14855 -6.88918
+E250 -7.31422 3.19647 -8.44268
+E251 -7.09051 1.66694 -9.77213
+E252 -7.24346 4.80120 -4.77214
+E253 -7.62652 3.24782 -4.40493
+E254 -7.55603 2.52648 -6.26962
+E255 -7.38028 1.35743 -7.84943
+E256 -6.86103 -0.14155 -9.14913
diff --git a/mne/channels/data/montages/GSN-HydroCel-257.sfp b/mne/channels/data/montages/GSN-HydroCel-257.sfp
new file mode 100644
index 0000000..98c6b1a
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-257.sfp
@@ -0,0 +1,260 @@
+FidNz 0.00000 10.56381 -2.05108
+FidT9 -7.82694 0.45386 -3.76056
+FidT10 7.82694 0.45386 -3.76056
+E1 6.96223 5.38242 -2.19061
+E2 6.48414 6.40424 -0.14004
+E3 5.69945 7.20796 1.79088
+E4 4.81093 7.77321 3.65006
+E5 3.61962 7.47782 5.50947
+E6 2.25278 6.46157 6.96317
+E7 1.18879 5.21755 8.13378
+E8 0.00000 3.59608 8.75111
+E9 -1.15339 1.51369 9.19904
+E10 5.94022 7.38337 -1.51513
+E11 5.07624 8.37264 0.40595
+E12 3.87946 9.03611 2.51559
+E13 2.60756 8.97868 4.39107
+E14 1.23344 8.11574 6.06161
+E15 0.00000 6.81181 7.28186
+E16 -1.18879 5.21755 8.13378
+E17 -2.29559 2.91372 8.55810
+E18 4.06489 9.40559 -0.89098
+E19 2.86784 10.01456 0.85212
+E20 1.42153 10.06322 2.84803
+E21 0.00000 9.40339 4.65829
+E22 -1.23344 8.11574 6.06161
+E23 -2.25278 6.46157 6.96317
+E24 -3.34467 4.40891 7.67253
+E25 1.39547 10.65281 -0.61138
+E26 0.00000 10.68996 1.00542
+E27 -1.42153 10.06322 2.84803
+E28 -2.60756 8.97868 4.39107
+E29 -3.61962 7.47782 5.50947
+E30 -4.49828 5.59395 6.28801
+E31 0.00000 10.56381 -2.05108
+E32 -1.39547 10.65281 -0.61138
+E33 -2.86784 10.01456 0.85212
+E34 -3.87946 9.03611 2.51559
+E35 -4.81093 7.77321 3.65006
+E36 -5.10466 6.41586 4.77815
+E37 -4.06489 9.40559 -0.89098
+E38 -5.07624 8.37264 0.40595
+E39 -5.69945 7.20796 1.79088
+E40 -6.16984 6.11292 3.29612
+E41 -6.01447 4.93908 4.85771
+E42 -5.33943 3.80220 6.32664
+E43 -4.64127 2.57224 7.50868
+E44 -3.53746 1.07133 8.47419
+E45 -1.99458 -0.60998 9.28870
+E46 -5.94022 7.38337 -1.51513
+E47 -6.48414 6.40424 -0.14004
+E48 -6.97545 5.35131 1.30741
+E49 -7.10064 4.23342 2.91874
+E50 -6.86564 3.16240 4.76800
+E51 -6.11380 1.94213 6.23844
+E52 -5.31389 0.60081 7.48811
+E53 -3.72368 -1.14573 8.58697
+E54 -6.96223 5.38242 -2.19061
+E55 -7.31613 4.37155 -0.61128
+E56 -7.66385 3.29619 1.04415
+E57 -7.62423 2.30205 2.81799
+E58 -7.36570 1.34368 4.60382
+E59 -6.70292 0.06004 6.23992
+E60 -5.40372 -1.61247 7.47343
+E61 -7.54098 3.05323 -2.51935
+E62 -7.77059 2.06323 -0.80729
+E63 -7.96921 1.20744 0.97332
+E64 -8.06621 0.40109 2.78565
+E65 -7.60767 -0.56840 4.59939
+E66 -6.81554 -1.94522 5.93053
+E67 -7.69315 1.74041 -4.18153
+E68 -7.74468 1.05291 -2.47059
+E69 -7.93758 0.07220 -0.96992
+E70 -7.98893 -0.75212 0.84194
+E71 -8.05947 -1.50296 2.76753
+E72 -7.56445 -2.31141 4.30327
+E73 -7.52646 0.73096 -5.96025
+E74 -7.76752 -1.84131 -0.92719
+E75 -7.79279 -2.73175 1.10033
+E76 -7.46191 -3.49308 2.95937
+E77 -6.86934 -3.79448 4.89401
+E78 -5.65276 -3.84604 6.52108
+E79 -4.12465 -3.54800 7.95405
+E80 -2.23647 -2.95809 8.92461
+E81 0.00000 -1.93834 9.45867
+E82 -7.12806 -0.49186 -7.34929
+E83 -7.37920 -3.49709 -2.18347
+E84 -7.52183 -3.70044 -0.51432
+E85 -7.15214 -4.71132 1.51762
+E86 -6.48817 -5.15829 3.47294
+E87 -5.53051 -5.46184 5.50189
+E88 -4.03809 -5.23807 7.04455
+E89 -2.29514 -4.87829 8.27223
+E90 0.00000 -3.74195 9.02791
+E91 -6.82585 -1.86426 -8.69399
+E92 -6.74047 -2.84840 -6.74712
+E93 -6.78379 -4.01784 -5.01755
+E94 -7.03346 -4.45090 -3.54895
+E95 -6.99052 -5.01694 -1.88810
+E96 -6.67571 -5.73608 0.10234
+E97 -5.96851 -6.52864 2.03293
+E98 -5.10822 -6.74936 3.92134
+E99 -3.75216 -6.67734 5.63719
+E100 -2.14874 -6.29190 7.11453
+E101 0.00000 -7.15042 6.95434
+E102 -6.36989 -3.82470 -8.20622
+E103 -6.24349 -4.62250 -6.49623
+E104 -6.09726 -5.61090 -4.67894
+E105 -6.31441 -6.01299 -3.25921
+E106 -5.98418 -6.74733 -1.40314
+E107 -5.23709 -7.57398 0.46627
+E108 -4.29098 -8.11323 2.38442
+E109 -3.24277 -8.15293 4.22025
+E110 -1.73181 -7.63850 5.69360
+E111 -5.63580 -5.80367 -7.74857
+E112 -5.38718 -6.45180 -6.16689
+E113 -5.08285 -7.32643 -4.32109
+E114 -5.27282 -7.46584 -2.87485
+E115 -4.13620 -8.61230 -1.04503
+E116 -3.13323 -9.13629 0.81878
+E117 -1.94503 -9.23415 2.62135
+E118 -1.09312 -8.74110 4.13810
+E119 0.00000 -8.09146 5.34087
+E120 -4.70608 -7.21970 -7.52955
+E121 -4.20415 -7.81153 -5.84368
+E122 -3.62234 -8.59338 -4.04243
+E123 -3.02717 -9.45363 -1.95941
+E124 -2.20152 -9.70916 -0.63755
+E125 -1.01682 -9.71656 0.95467
+E126 0.00000 -9.23206 2.54671
+E127 1.09312 -8.74110 4.13810
+E128 1.73181 -7.63850 5.69360
+E129 2.14874 -6.29190 7.11453
+E130 2.29514 -4.87829 8.27223
+E131 2.23647 -2.95809 8.92461
+E132 1.99458 -0.60998 9.28870
+E133 -3.45625 -8.57317 -6.82654
+E134 -2.71528 -8.94646 -5.55376
+E135 -2.03205 -9.56166 -3.44989
+E136 -0.91885 -9.62744 -2.21054
+E137 0.00000 -9.58535 -0.88629
+E138 1.01682 -9.71656 0.95467
+E139 1.94503 -9.23415 2.62135
+E140 3.24277 -8.15293 4.22025
+E141 3.75216 -6.67734 5.63719
+E142 4.03809 -5.23807 7.04455
+E143 4.12465 -3.54800 7.95405
+E144 3.72368 -1.14573 8.58697
+E145 -1.88533 -9.22031 -6.79889
+E146 -1.06111 -9.53369 -5.45325
+E147 0.00000 -9.48329 -3.84204
+E148 0.91885 -9.62744 -2.21054
+E149 2.20152 -9.70916 -0.63755
+E150 3.13323 -9.13629 0.81878
+E151 4.29098 -8.11323 2.38442
+E152 5.10822 -6.74936 3.92134
+E153 5.53051 -5.46184 5.50189
+E154 5.65276 -3.84604 6.52108
+E155 5.40372 -1.61247 7.47343
+E156 1.06111 -9.53369 -5.45325
+E157 2.03205 -9.56166 -3.44989
+E158 3.02717 -9.45363 -1.95941
+E159 4.13620 -8.61230 -1.04503
+E160 5.23709 -7.57398 0.46627
+E161 5.96851 -6.52864 2.03293
+E162 6.48817 -5.15829 3.47294
+E163 6.86934 -3.79448 4.89401
+E164 6.81554 -1.94522 5.93053
+E165 1.88533 -9.22031 -6.79889
+E166 2.71528 -8.94646 -5.55376
+E167 3.62234 -8.59338 -4.04243
+E168 5.27282 -7.46584 -2.87485
+E169 5.98418 -6.74733 -1.40314
+E170 6.67571 -5.73608 0.10234
+E171 7.15214 -4.71132 1.51762
+E172 7.46191 -3.49308 2.95937
+E173 7.56445 -2.31141 4.30327
+E174 3.45625 -8.57317 -6.82654
+E175 4.20415 -7.81153 -5.84368
+E176 5.08285 -7.32643 -4.32109
+E177 6.31441 -6.01299 -3.25921
+E178 6.99052 -5.01694 -1.88810
+E179 7.52183 -3.70044 -0.51432
+E180 7.79279 -2.73175 1.10033
+E181 8.05947 -1.50296 2.76753
+E182 7.60767 -0.56840 4.59939
+E183 6.70292 0.06004 6.23992
+E184 5.31389 0.60081 7.48811
+E185 3.53746 1.07133 8.47419
+E186 1.15339 1.51369 9.19904
+E187 4.70608 -7.21970 -7.52955
+E188 5.38718 -6.45180 -6.16689
+E189 6.09726 -5.61090 -4.67894
+E190 7.03346 -4.45090 -3.54895
+E191 7.37920 -3.49709 -2.18347
+E192 7.76752 -1.84131 -0.92719
+E193 7.98893 -0.75212 0.84194
+E194 8.06621 0.40109 2.78565
+E195 7.36570 1.34368 4.60382
+E196 6.11380 1.94213 6.23844
+E197 4.64127 2.57224 7.50868
+E198 2.29559 2.91372 8.55810
+E199 5.63580 -5.80367 -7.74857
+E200 6.24349 -4.62250 -6.49623
+E201 6.78379 -4.01784 -5.01755
+E202 7.93758 0.07220 -0.96992
+E203 7.96921 1.20744 0.97332
+E204 7.62423 2.30205 2.81799
+E205 6.86564 3.16240 4.76800
+E206 5.33943 3.80220 6.32664
+E207 3.34467 4.40891 7.67253
+E208 6.36989 -3.82470 -8.20622
+E209 6.74047 -2.84840 -6.74712
+E210 7.74468 1.05291 -2.47059
+E211 7.77059 2.06323 -0.80729
+E212 7.66385 3.29619 1.04415
+E213 7.10064 4.23342 2.91874
+E214 6.01447 4.93908 4.85771
+E215 4.49828 5.59395 6.28801
+E216 6.82585 -1.86426 -8.69399
+E217 7.12806 -0.49186 -7.34929
+E218 7.52646 0.73096 -5.96025
+E219 7.69315 1.74041 -4.18153
+E220 7.54098 3.05323 -2.51935
+E221 7.31613 4.37155 -0.61128
+E222 6.97545 5.35131 1.30741
+E223 6.16984 6.11292 3.29612
+E224 5.10466 6.41586 4.77815
+E225 7.62652 3.24782 -4.40493
+E226 7.24346 4.80120 -4.77214
+E227 7.55603 2.52648 -6.26962
+E228 7.38028 1.35743 -7.84943
+E229 6.86103 -0.14155 -9.14913
+E230 6.74159 5.99080 -5.83258
+E231 7.22458 4.14855 -6.88918
+E232 7.31422 3.19647 -8.44268
+E233 7.09051 1.66694 -9.77213
+E234 5.88750 7.22674 -6.54736
+E235 6.65934 5.64059 -7.65729
+E236 6.75138 4.62427 -9.03070
+E237 6.58044 3.33743 -10.39707
+E238 4.69146 8.22723 -6.78260
+E239 5.81346 6.42065 -8.65026
+E240 6.04363 5.37051 -9.81363
+E241 -4.69146 8.22723 -6.78260
+E242 -5.81346 6.42065 -8.65026
+E243 -6.04363 5.37051 -9.81363
+E244 -5.88750 7.22674 -6.54736
+E245 -6.65934 5.64059 -7.65729
+E246 -6.75138 4.62427 -9.03070
+E247 -6.58044 3.33743 -10.39707
+E248 -6.74159 5.99080 -5.83258
+E249 -7.22458 4.14855 -6.88918
+E250 -7.31422 3.19647 -8.44268
+E251 -7.09051 1.66694 -9.77213
+E252 -7.24346 4.80120 -4.77214
+E253 -7.62652 3.24782 -4.40493
+E254 -7.55603 2.52648 -6.26962
+E255 -7.38028 1.35743 -7.84943
+E256 -6.86103 -0.14155 -9.14913
+Cz 0.00000 0.00000 9.68308
diff --git a/mne/channels/data/montages/GSN-HydroCel-32.sfp b/mne/channels/data/montages/GSN-HydroCel-32.sfp
new file mode 100644
index 0000000..214fb1b
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-32.sfp
@@ -0,0 +1,36 @@
+FidNz	0	9.071585155	-2.359754454
+FidT9	-6.711765	0.040402876	-3.251600355
+FidT10	6.711765	0.040402876	-3.251600355
+E1	-2.695405558	8.884820317	1.088308144
+E2	2.695405558	8.884820317	1.088308144
+E3	-4.459387187	6.021159964	4.365321482
+E4	4.459387187	6.021159964	4.365321482
+E5	-5.47913021	0.284948655	6.38332782
+E6	5.47913021	0.284948655	6.38332782
+E7	-5.831241498	-4.494821698	4.955347697
+E8	5.831241498	-4.494821698	4.955347697
+E9	-2.738838019	-8.607966849	0.239368223
+E10	2.738838019	-8.607966849	0.239368223
+E11	-6.399087198	4.127248875	-0.356852241
+E12	6.399087198	4.127248875	-0.356852241
+E13	-7.304625099	-1.866238006	-0.629182006
+E14	7.304625099	-1.866238006	-0.629182006
+E15	-6.034746843	-5.755782196	0.051843011
+E16	6.034746843	-5.755782196	0.051843011
+E17	0	7.96264703	5.044718001
+E18	0	9.271139705	-2.211516434
+E19	0	-6.676694032	6.465208258
+E20	0	-8.996686498	0.487952047
+E21	-6.518995129	2.417299399	-5.253637073
+E22	6.518995129	2.417299399	-5.253637073
+E23	-6.174969392	-2.458138877	-5.637380998
+E24	6.174969392	-2.458138877	-5.637380998
+E25	-3.784983913	-6.401014415	-5.260040689
+E26	3.784983913	-6.401014415	-5.260040689
+E27	0	9.087440894	1.333345013
+E28	0	3.806770224	7.891304964
+E29	-3.743504949	6.649204911	-6.530243068
+E30	3.743504949	6.649204911	-6.530243068
+E31	-6.118458137	4.523870113	-4.409174427
+E32	6.118458137	4.523870113	-4.409174427
+Cz	0	0	8.899186843
diff --git a/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp b/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp
new file mode 100644
index 0000000..004dcb0
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp
@@ -0,0 +1,67 @@
+FidNz      0.00000      10.3556     -2.69376
+FidT9     -7.18083    0.0461216     -3.71184
+FidT10      6.24270    0.0461216     -3.71184
+E1      6.60688      6.30230     -2.94229
+E2      4.41106      8.71481      3.50199
+E3      3.27490      8.15713      5.69580
+E4      0.00000      4.34559      9.00826
+E5      3.07692      10.1424      1.24235
+E6      0.00000      9.08970      5.75876
+E7     -2.78065      3.71493      8.68573
+E8      0.00000      10.3612      3.54499
+E9     -3.2749      8.15713      5.6958
+E10     -3.07692      10.1424      1.24235
+E11     -4.41106      8.71481      3.50199
+E12     -5.09058      6.87341      4.98320
+E13     -6.48687      6.22527      3.23806
+E14     -6.33176      4.74636      5.28262
+E15     -5.43625      3.07969      7.18905
+E16     -4.21856      1.09635      8.70749
+E17     -6.60688      6.30230     -2.94229
+E18     -7.30483      4.71143    -0.407362
+E19     -7.78984      3.38858      2.77404
+E20     -6.25466     0.325281      7.28684
+E21     -4.46332     -1.73406      8.86309
+E22     -7.88241    -0.914323      5.25116
+E23     -7.80897      1.45945     -4.05862
+E24     -8.33854     -2.13039    -0.718238
+E25     -8.34755     -2.62392      2.72292
+E26     -7.69093     -3.43812      4.76981
+E27     -7.48627     -5.32762      3.13923
+E28     -6.65661     -5.13103      5.65674
+E29     -7.51185     -4.26886     -3.41445
+E30     -6.88892     -6.57047    0.0591810
+E31     -4.69965     -6.91953      6.12524
+E32     -6.16900     -6.70120     -3.30093
+E33     -2.10574     -8.39538      5.96342
+E34      0.00000     -4.98271      9.28085
+E35     -3.12650     -9.82636     0.273249
+E36      0.00000     -8.93816      5.35112
+E37      0.00000     -10.2701     0.557018
+E38      2.10574     -8.39538      5.96342
+E39      3.12650     -9.82636     0.273249
+E40      4.69965     -6.91953      6.12524
+E41      4.46332     -1.73406      8.86309
+E42      6.65661     -5.13103      5.65674
+E43      6.16900     -6.70120     -3.30093
+E44      6.88892     -6.57047    0.0591810
+E45      7.48627     -5.32762      3.13923
+E46      7.69093     -3.43812      4.76981
+E47      7.51185     -4.26886     -3.41445
+E48      8.34755     -2.62392      2.72292
+E49      7.88241    -0.914323      5.25116
+E50      6.25466     0.325281      7.28684
+E51      4.21856      1.09635      8.70749
+E52      8.33854     -2.13039    -0.718238
+E53      5.43625      3.07969      7.18905
+E54      2.78065      3.71493      8.68573
+E55      7.80897      1.45945     -4.05862
+E56      7.78984      3.38858      2.77404
+E57      6.33176      4.74636      5.28262
+E58      7.30483      4.71143    -0.407362
+E59      6.48687      6.22527      3.23806
+E60      5.09058      6.87341      4.98320
+E61      6.98448      5.16419     -5.03326
+E62      4.27337      7.59035     -7.45455
+E63     -4.27337      7.59035     -7.45455
+E64     -6.98448      5.16419     -5.03326
diff --git a/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp b/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp
new file mode 100644
index 0000000..c1c455d
--- /dev/null
+++ b/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp
@@ -0,0 +1,68 @@
+FidNz      0.00000      10.3556     -2.69376
+FidT9     -7.18083    0.0461216     -3.71184
+FidT10      6.24270    0.0461216     -3.71184
+E1      6.60688      6.30230     -2.94229
+E2      4.41106      8.71481      3.50199
+E3      3.27490      8.15713      5.69580
+E4      0.00000      4.34559      9.00826
+E5      3.07692      10.1424      1.24235
+E6      0.00000      9.08970      5.75876
+E7     -2.78065      3.71493      8.68573
+E8      0.00000      10.3612      3.54499
+E9     -3.2749      8.15713      5.6958
+E10     -3.07692      10.1424      1.24235
+E11     -4.41106      8.71481      3.50199
+E12     -5.09058      6.87341      4.98320
+E13     -6.48687      6.22527      3.23806
+E14     -6.33176      4.74636      5.28262
+E15     -5.43625      3.07969      7.18905
+E16     -4.21856      1.09635      8.70749
+E17     -6.60688      6.30230     -2.94229
+E18     -7.30483      4.71143    -0.407362
+E19     -7.78984      3.38858      2.77404
+E20     -6.25466     0.325281      7.28684
+E21     -4.46332     -1.73406      8.86309
+E22     -7.88241    -0.914323      5.25116
+E23     -7.80897      1.45945     -4.05862
+E24     -8.33854     -2.13039    -0.718238
+E25     -8.34755     -2.62392      2.72292
+E26     -7.69093     -3.43812      4.76981
+E27     -7.48627     -5.32762      3.13923
+E28     -6.65661     -5.13103      5.65674
+E29     -7.51185     -4.26886     -3.41445
+E30     -6.88892     -6.57047    0.0591810
+E31     -4.69965     -6.91953      6.12524
+E32     -6.16900     -6.70120     -3.30093
+E33     -2.10574     -8.39538      5.96342
+E34      0.00000     -4.98271      9.28085
+E35     -3.12650     -9.82636     0.273249
+E36      0.00000     -8.93816      5.35112
+E37      0.00000     -10.2701     0.557018
+E38      2.10574     -8.39538      5.96342
+E39      3.12650     -9.82636     0.273249
+E40      4.69965     -6.91953      6.12524
+E41      4.46332     -1.73406      8.86309
+E42      6.65661     -5.13103      5.65674
+E43      6.16900     -6.70120     -3.30093
+E44      6.88892     -6.57047    0.0591810
+E45      7.48627     -5.32762      3.13923
+E46      7.69093     -3.43812      4.76981
+E47      7.51185     -4.26886     -3.41445
+E48      8.34755     -2.62392      2.72292
+E49      7.88241    -0.914323      5.25116
+E50      6.25466     0.325281      7.28684
+E51      4.21856      1.09635      8.70749
+E52      8.33854     -2.13039    -0.718238
+E53      5.43625      3.07969      7.18905
+E54      2.78065      3.71493      8.68573
+E55      7.80897      1.45945     -4.05862
+E56      7.78984      3.38858      2.77404
+E57      6.33176      4.74636      5.28262
+E58      7.30483      4.71143    -0.407362
+E59      6.48687      6.22527      3.23806
+E60      5.09058      6.87341      4.98320
+E61      6.98448      5.16419     -5.03326
+E62      4.27337      7.59035     -7.45455
+E63     -4.27337      7.59035     -7.45455
+E64     -6.98448      5.16419     -5.03326
+Cz      0.00000      0.00000      10.1588
diff --git a/mne/channels/data/montages/biosemi128.txt b/mne/channels/data/montages/biosemi128.txt
new file mode 100644
index 0000000..69739c6
--- /dev/null
+++ b/mne/channels/data/montages/biosemi128.txt
@@ -0,0 +1,132 @@
+Site	 Theta	Phi
+A1	0	0
+A2	11.5	-90
+A3	23	-90
+A4	34.5	-90
+A5	-46	67.5
+A6	-46	45
+A7	-57.5	45
+A8	-69	54
+A9	-80.5	54
+A10	-92	54
+A11	-103.5	54
+A12	-115	54
+A13	-115	72
+A14	-103.5	72
+A15	-92	72
+A16	-80.5	72
+A17	-69	72
+A18	-57.5	67.5
+A19	46	-90
+A20	57.5	-90
+A21	69	-90
+A22	80.5	-90
+A23	92	-90
+A24	103.5	-90
+A25	115	-90
+A26	115	-72
+A27	103.5	-72
+A28	92	-72
+A29	80.5	-72
+A30	69	-72
+A31	57.5	-67.5
+A32	46	-67.5
+B1	11.5	-18
+B2	23	-45
+B3	46	-45
+B4	57.5	-45
+B5	69	-54
+B6	80.5	-54
+B7	92	-54
+B8	103.5	-54
+B9	115	-54
+B10	103.5	-36
+B11	92	-36
+B12	80.5	-36
+B13	69	-36
+B14	92	-18
+B15	80.5	-18
+B16	69	-18
+B17	57.5	-22.5
+B18	46	-22.5
+B19	34.5	-30
+B20	23	0
+B21	34.5	0
+B22	46	0
+B23	57.5	0
+B24	69	0
+B25	80.5	0
+B26	92	0
+B27	92	18
+B28	80.5	18
+B29	69	18
+B30	57.5	22.5
+B31	46	22.5
+B32	34.5	30
+C1	11.5	54
+C2	23	45
+C3	46	45
+C4	57.5	45
+C5	69	36
+C6	80.5	36
+C7	92	36
+C8	92	54
+C9	80.5	54
+C10	69	54
+C11	34.5	60
+C12	46	67.5
+C13	57.5	67.5
+C14	69	72
+C15	80.5	72
+C16	92	72
+C17	92	90
+C18	80.5	90
+C19	69	90
+C20	57.5	90
+C21	46	90
+C22	34.5	90
+C23	23	90
+C24	-34.5	-60
+C25	-46	-67.5
+C26	-57.5	-67.5
+C27	-69	-72
+C28	-80.5	-72
+C29	-92	-72
+C30	-92	-54
+C31	-80.5	-54
+C32	-69	-54
+D1	-11.5	-54
+D2	-23	-45
+D3	-46	-45
+D4	-57.5	-45
+D5	-69	-36
+D6	-80.5	-36
+D7	-92	-36
+D8	-92	-18
+D9	-80.5	-18
+D10	-69	-18
+D11	-57.5	-22.5
+D12	-46	-22.5
+D13	-34.5	-30
+D14	-23	0
+D15	-11.5	18
+D16	-23	45
+D17	-34.5	30
+D18	-34.5	0
+D19	-46	0
+D20	-57.5	0
+D21	-69	0
+D22	-80.5	0
+D23	-92	0
+D24	-92	18
+D25	-80.5	18
+D26	-69	18
+D27	-57.5	22.5
+D28	-46	22.5
+D29	-69	36
+D30	-80.5	36
+D31	-92	36
+D32	-103.5	36
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/biosemi16.txt b/mne/channels/data/montages/biosemi16.txt
new file mode 100644
index 0000000..d8a6769
--- /dev/null
+++ b/mne/channels/data/montages/biosemi16.txt
@@ -0,0 +1,20 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+Fp2	  92	 72
+F4	  60	 51
+Fz	  46	 90
+F3	 -60	-51
+T7	 -92	  0
+C3	 -46	  0
+Cz	   0	  0
+C4	  46	  0
+T8	  92	  0
+P4	  60	-51
+Pz	  46	-90
+P3	 -60	 51
+O1	 -92	 72
+Oz	  92	-90
+O2	  92	-72
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/biosemi160.txt b/mne/channels/data/montages/biosemi160.txt
new file mode 100644
index 0000000..04fefc7
--- /dev/null
+++ b/mne/channels/data/montages/biosemi160.txt
@@ -0,0 +1,164 @@
+Site	 Theta	Phi
+A1	0	0
+A2	11.5	-90
+A3	23	-90
+A4	34.5	-90
+A5	-46	72
+A6	-46	54
+A7	-57.5	54
+A8	-69	60
+A9	-80.5	60
+A10	-92	60
+A11	-103.5	60
+A12	-115	60
+A13	-115	75
+A14	-103.5	75
+A15	-92	75
+A16	-80.5	75
+A17	-69	75
+A18	-57.5	72
+A19	46	-90
+A20	57.5	-90
+A21	69	-90
+A22	80.5	-90
+A23	92	-90
+A24	103.5	-90
+A25	115	-90
+A26	115	-75
+A27	103.5	-75
+A28	92	-75
+A29	80.5	-75
+A30	69	-75
+A31	57.5	-72
+A32	46	-72
+B1	11.5	-18
+B2	23	-60
+B3	46	-54
+B4	57.5	-54
+B5	69	-60
+B6	80.5	-60
+B7	92	-60
+B8	103.5	-60
+B9	115	-60
+B10	115	-45
+B11	103.5	-45
+B12	92	-45
+B13	80.5	-45
+B14	69	-45
+B15	69	-30
+B16	80.5	-30
+B17	92	-30
+B18	103.5	-30
+B19	92	-15
+B20	80.5	-15
+B21	69	-15
+B22	57.5	-36
+B23	46	-36
+B24	34.5	-45
+B25	23	-30
+B26	34.5	-22.5
+B27	46	-18
+B28	57.5	-18
+B29	57.5	0
+B30	69	0
+B31	80.5	0
+B32	92	0
+C1	11.5	54
+C2	23	30
+C3	23	0
+C4	34.5	0
+C5	34.5	22.5
+C6	46	18
+C7	46	0
+C8	57.5	18
+C9	69	15
+C10	80.5	15
+C11	92	15
+C12	92	30
+C13	80.5	30
+C14	69	30
+C15	69	45
+C16	80.5	45
+C17	92	45
+C18	92	60
+C19	80.5	60
+C20	69	60
+C21	57.5	54
+C22	57.5	36
+C23	46	36
+C24	34.5	45
+C25	23	60
+C26	34.5	67.5
+C27	46	54
+C28	46	72
+C29	57.5	72
+C30	69	75
+C31	80.5	75
+C32	92	75
+D1	-11.5	-54
+D2	23	90
+D3	34.5	90
+D4	46	90
+D5	57.5	90
+D6	69	90
+D7	80.5	90
+D8	92	90
+D9	-92	-75
+D10	-80.5	-75
+D11	-69	-75
+D12	-57.5	-72
+D13	-46	-72
+D14	-34.5	-67.5
+D15	-23	-60
+D16	-23	-30
+D17	-34.5	-45
+D18	-46	-54
+D19	-57.5	-54
+D20	-69	-60
+D21	-80.5	-60
+D22	-92	-60
+D23	-92	-45
+D24	-80.5	-45
+D25	-69	-45
+D26	-57.5	-36
+D27	-46	-36
+D28	-34.5	-22.5
+D29	-46	-18
+D30	-69	-30
+D31	-80.5	-30
+D32	-92	-30
+E1	-11.5	18
+E2	-23	0
+E3	-34.5	0
+E4	-46	0
+E5	-57.5	-18
+E6	-69	-15
+E7	-80.5	-15
+E8	-92	-15
+E9	-92	0
+E10	-80.5	0
+E11	-69	0
+E12	-57.5	0
+E13	-57.5	18
+E14	-69	15
+E15	-80.5	15
+E16	-92	15
+E17	-103.5	30
+E18	-92	30
+E19	-80.5	30
+E20	-69	30
+E21	-46	18
+E22	-34.5	22.5
+E23	-23	30
+E24	-23	60
+E25	-34.5	45
+E26	-46	36
+E27	-57.5	36
+E28	-69	45
+E29	-80.5	45
+E30	-92	45
+E31	-103.5	45
+E32	-115	45
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/biosemi256.txt b/mne/channels/data/montages/biosemi256.txt
new file mode 100644
index 0000000..50085a2
--- /dev/null
+++ b/mne/channels/data/montages/biosemi256.txt
@@ -0,0 +1,260 @@
+Site	 Theta	Phi
+A1	0	0
+A2	9.2	-90
+A3	18.4	-90
+A4	27.6	-90
+A5	36.8	-90
+A6	46	-90
+A7	-46	75
+A8	-55.2	75
+A9	-64.4	78
+A10	-73.6	78
+A11	-82.8	78.75
+A12	-92	78.75
+A13	-101.2	78.75
+A14	-110.4	78
+A15	-119.6	78
+A16	119.6	-90
+A17	110.4	-90
+A18	101.2	-90
+A19	92	-90
+A20	82.8	-90
+A21	73.6	-90
+A22	64.4	-90
+A23	55.2	-90
+A24	46	-75
+A25	55.2	-75
+A26	64.4	-78
+A27	73.6	-78
+A28	82.8	-78.75
+A29	92	-78.75
+A30	101.2	-78.75
+A31	110.4	-78
+A32	119.6	-78
+B1	18.4	-54
+B2	27.6	-66
+B3	36.8	-54
+B4	46	-60
+B5	55.2	-60
+B6	64.4	-66
+B7	73.6	-66
+B8	82.8	-67.5
+B9	92	-67.5
+B10	101.2	-67.5
+B11	110.4	-66
+B12	119.6	-66
+B13	110.4	-54
+B14	101.2	-56.25
+B15	92	-56.25
+B16	82.8	-56.25
+B17	73.6	-54
+B18	64.4	-54
+B19	55.2	-45
+B20	46	-45
+B21	27.6	-42
+B22	36.8	-36
+B23	46	-30
+B24	55.2	-30
+B25	64.4	-42
+B26	73.6	-42
+B27	82.8	-45
+B28	92	-45
+B29	101.2	-45
+B30	110.4	-42
+B31	110.4	-30
+B32	101.2	-33.75
+C1	9.2	-18
+C2	18.4	-18
+C3	27.6	-18
+C4	36.8	-18
+C5	46	-15
+C6	55.2	-15
+C7	64.4	-18
+C8	64.4	-30
+C9	73.6	-30
+C10	82.8	-33.75
+C11	92	-33.75
+C12	101.2	-22.5
+C13	92	-22.5
+C14	82.8	-22.5
+C15	73.6	-18
+C16	82.8	-11.25
+C17	92	-11.25
+C18	92	0
+C19	82.8	0
+C20	73.6	-6
+C21	64.4	-6
+C22	55.2	0
+C23	46	0
+C24	36.8	0
+C25	27.6	6
+C26	36.8	18
+C27	46	15
+C28	55.2	15
+C29	64.4	6
+C30	73.6	6
+C31	82.8	11.25
+C32	92	11.25
+D1	9.2	54
+D2	18.4	18
+D3	27.6	30
+D4	36.8	36
+D5	46	30
+D6	64.4	18
+D7	73.6	18
+D8	82.8	22.5
+D9	92	22.5
+D10	101.2	22.5
+D11	101.2	33.75
+D12	92	33.75
+D13	82.8	33.75
+D14	73.6	30
+D15	64.4	30
+D16	55.2	30
+D17	46	45
+D18	55.2	45
+D19	64.4	42
+D20	73.6	42
+D21	82.8	45
+D22	92	45
+D23	101.2	45
+D24	92	56.25
+D25	82.8	56.25
+D26	73.6	54
+D27	64.4	54
+D28	55.2	60
+D29	64.4	66
+D30	73.6	66
+D31	82.8	67.5
+D32	92	67.5
+E1	18.4	90
+E2	18.4	54
+E3	27.6	54
+E4	36.8	54
+E5	46	60
+E6	46	75
+E7	55.2	75
+E8	64.4	78
+E9	73.6	78
+E10	82.8	78.75
+E11	92	78.75
+E12	92	90
+E13	82.8	90
+E14	73.6	90
+E15	64.4	90
+E16	55.2	90
+E17	46	90
+E18	36.8	90
+E19	36.8	72
+E20	27.6	78
+E21	-27.6	-78
+E22	-36.8	-72
+E23	-46	-75
+E24	-55.2	-75
+E25	-64.4	-78
+E26	-73.6	-78
+E27	-82.8	-78.75
+E28	-92	-78.75
+E29	-92	-67.5
+E30	-82.8	-67.5
+E31	-73.6	-66
+E32	-64.4	-66
+F1	-9.2	-54
+F2	-18.4	-54
+F3	-27.6	-54
+F4	-36.8	-54
+F5	-46	-60
+F6	-55.2	-60
+F7	-64.4	-54
+F8	-73.6	-54
+F9	-82.8	-56.25
+F10	-92	-56.25
+F11	-101.2	-45
+F12	-92	-45
+F13	-82.8	-45
+F14	-73.6	-42
+F15	-64.4	-42
+F16	-55.2	-45
+F17	-46	-45
+F18	-36.8	-36
+F19	-27.6	-30
+F20	-18.4	-18
+F21	-27.6	-6
+F22	-36.8	-18
+F23	-46	-30
+F24	-55.2	-30
+F25	-64.4	-30
+F26	-73.6	-30
+F27	-82.8	-33.75
+F28	-92	-33.75
+F29	-101.2	-33.75
+F30	-101.2	-22.5
+F31	-92	-22.5
+F32	-82.8	-22.5
+G1	-9.2	18
+G2	-18.4	18
+G3	-27.6	18
+G4	-36.8	0
+G5	-46	-15
+G6	-55.2	-15
+G7	-64.4	-18
+G8	-73.6	-18
+G9	-82.8	-11.25
+G10	-92	-11.25
+G11	-92	0
+G12	-82.8	0
+G13	-73.6	-6
+G14	-64.4	-6
+G15	-55.2	0
+G16	-46	0
+G17	-55.2	15
+G18	-64.4	6
+G19	-73.6	6
+G20	-82.8	11.25
+G21	-92	11.25
+G22	-101.2	22.5
+G23	-92	22.5
+G24	-82.8	22.5
+G25	-73.6	18
+G26	-64.4	18
+G27	-64.4	30
+G28	-73.6	30
+G29	-82.8	33.75
+G30	-92	33.75
+G31	-101.2	33.75
+G32	-110.4	30
+H1	-18.4	54
+H2	-27.6	42
+H3	-36.8	36
+H4	-36.8	18
+H5	-46	15
+H6	-46	30
+H7	-55.2	30
+H8	-64.4	42
+H9	-73.6	42
+H10	-82.8	45
+H11	-92	45
+H12	-101.2	45
+H13	-110.4	42
+H14	-110.4	54
+H15	-101.2	56.25
+H16	-92	56.25
+H17	-82.8	56.25
+H18	-73.6	54
+H19	-64.4	54
+H20	-55.2	45
+H21	-46	45
+H22	-36.8	54
+H23	-27.6	66
+H24	-46	60
+H25	-55.2	60
+H26	-64.4	66
+H27	-73.6	66
+H28	-82.8	67.5
+H29	-92	67.5
+H30	-101.2	67.5
+H31	-110.4	66
+H32	-119.6	66
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/biosemi32.txt b/mne/channels/data/montages/biosemi32.txt
new file mode 100644
index 0000000..d2e0a14
--- /dev/null
+++ b/mne/channels/data/montages/biosemi32.txt
@@ -0,0 +1,36 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+AF3	 -74	-65
+F7	 -92	-36
+F3	 -60	-51
+FC1	 -32	-45
+FC5	 -72	-21
+T7	 -92	  0
+C3	 -46	  0
+CP1	 -32	 45
+CP5	 -72	 21
+P7	 -92	 36
+P3	 -60	 51
+Pz	  46	-90
+PO3	 -74	 65
+O1	 -92	 72
+Oz	  92	-90
+O2	  92	-72
+PO4	  74	-65
+P4	  60	-51
+P8	  92	-36
+CP6	  72	-21
+CP2	  32	-45
+C4	  46	  0
+T8 	  92	  0
+FC6	  72	 21
+FC2	  32	 45
+F4	  60	 51
+F8	  92	 36
+AF4	  74	 65
+Fp2	  92	 72
+Fz	  46	 90
+Cz	   0	  0
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/biosemi64.txt b/mne/channels/data/montages/biosemi64.txt
new file mode 100644
index 0000000..4071cfb
--- /dev/null
+++ b/mne/channels/data/montages/biosemi64.txt
@@ -0,0 +1,68 @@
+Site	 Theta	Phi
+Fp1	-92	-72
+AF7	-92	-54
+AF3	-74	-65
+F1	-50	-68
+F3	-60	-51
+F5	-75	-41
+F7	-92	-36
+FT7	-92	-18
+FC5	-72	-21
+FC3	-50	-28
+FC1	-32	-45
+C1	-23	0
+C3	-46	0
+C5	-69	0
+T7	-92	0
+TP7	-92	18
+CP5	-72	21
+CP3	-50	28
+CP1	-32	45
+P1	-50	68
+P3	-60	51
+P5	-75	41
+P7	-92	36
+P9	-115	36
+PO7	-92	54
+PO3	-74	65
+O1	-92	72
+Iz	115	-90
+Oz	92	-90
+POz	69	-90
+Pz	46	-90
+CPz	23	-90
+Fpz	92	90
+Fp2	92	72
+AF8	92	54
+AF4	74	65
+AFz	69	90
+Fz	46	90
+F2	50	68
+F4	60	51
+F6	75	41
+F8	92	36
+FT8	92	18
+FC6	72	21
+FC4	50	28
+FC2	32	45
+FCz	23	90
+Cz	0	0
+C2	23	0
+C4	46	0
+C6	69	0
+T8	92	0
+TP8	92	-18
+CP6	72	-21
+CP4	50	-28
+CP2	32	-45
+P2	50	-68
+P4	60	-51
+P6	75	-41
+P8	92	-36
+P10	115	-36
+PO8	92	-54
+PO4	74	-65
+O2	92	-72
+Nz	 115	 90
+LPA	-115	  0
+RPA	 115	  0
diff --git a/mne/channels/data/montages/easycap-M1.txt b/mne/channels/data/montages/easycap-M1.txt
new file mode 100644
index 0000000..271dc0f
--- /dev/null
+++ b/mne/channels/data/montages/easycap-M1.txt
@@ -0,0 +1,75 @@
+Site	 Theta	Phi
+Fp1	 -92	-72
+Fp2	  92	 72
+F3	 -60	-51
+F4	  60	 51
+C3	 -46	  0
+C4	  46	  0
+P3	 -60	 51
+P4	  60	-51
+O1	 -92	 72
+O2	  92	-72
+F7	 -92	-36
+F8	  92	 36
+T7	 -92	  0
+T8	  92	  0
+P7	 -92	 36
+P8	  92	-36
+Fz	  46	 90
+Cz	   0	  0
+Pz	  46	-90
+F1	 -50	-68
+F2	  50	 68
+FC1	 -32	-45
+FC2	  32	 45
+C1	 -23	  0
+C2	  23	  0
+CP1	 -32	 45
+CP2	  32	-45
+P1	 -50	 68
+P2	  50	-68
+AF3	 -74	-65
+AF4	  74	 65
+FC3	 -53	-33
+FC4	  53	 33
+CP3	 -52	 33
+CP4	  52	-33
+PO3	 -74	 65
+PO4	  74	-65
+F5	 -75	-41
+F6	  75	 41
+FC5	 -72	-21
+FC6	  72	 21
+C5	 -69	  0
+C6	  69	  0
+CP5	 -72	 21
+CP6	  72	-21
+P5	 -75	 41
+P6	  75	-41
+AF7	 -92	-54
+AF8	  92	 54
+FT7	 -92	-18
+FT8	  92	 18
+TP7	 -92	 18
+TP8	  92	-18
+PO7	 -92	 54
+PO8	  92	-54
+F9	-115	-36
+F10	 115	 36
+FT9	-115	-18
+FT10	 115	 18
+TP9	-115	 18
+TP10	 115	-18
+P9	-115	 36
+P10	 115	-36
+PO9	-115	 54
+PO10	 115	-54
+O9	-115	 72
+O10	 115	-72
+Fpz	  92	 90
+AFz	  69	 90
+FCz	  23	 90
+CPz	  23	-90
+POz	  69	-90
+Oz	  92	-90
+Iz	 115	-90
diff --git a/mne/channels/data/montages/easycap-M10.txt b/mne/channels/data/montages/easycap-M10.txt
new file mode 100644
index 0000000..7019fc6
--- /dev/null
+++ b/mne/channels/data/montages/easycap-M10.txt
@@ -0,0 +1,62 @@
+Site	 Theta	Phi
+1	   0	   0
+2	  23	  90
+3	  23	  30
+4	  23	 -30
+5	  23	 -90
+6	 -23	  30
+7	 -23	 -30
+8	  46	  90
+9	  46	  66
+10	  46	  33
+11	  46	   0
+12	  46	 -33
+13	  46	 -66
+14	  46	 -90
+15	 -46	  66
+16	 -46	  33
+17	 -46	   0
+18	 -46	 -33
+19	 -46	 -66
+20	  69	  90
+21	  69	  66
+22	  69	  42
+23	  69	  18
+24	  69	  -6
+25	  69	 -30
+26	  69	 -54
+27	  69	 -78
+28	 -69	  78
+29	 -69	  54
+30	 -69	  30
+31	 -69	   6
+32	 -69	 -18
+33	 -69	 -42
+34	 -69	 -66
+35	  92	  90
+36	  92	  68
+37	  92	  45
+38	  92	  22
+39	  92	   0
+40	  92	 -22
+41	  92	 -45
+42	  92	 -68
+43	  92	 -90
+44	 -92	  68
+45	 -92	  45
+46	 -92	  22
+47	 -92	   0
+48	 -92	 -22
+49	 -92	 -45
+50	 -92	 -68
+51	 115	  35
+52	 115	  10
+53	 115	 -15
+54	 115	 -40
+55	 115	 -65
+56	 115	 -90
+57	-115	  65
+58	-115	  40
+59	-115	  15
+60	-115	 -10
+61	-115	 -35
diff --git a/mne/channels/data/montages/standard_1005.elc b/mne/channels/data/montages/standard_1005.elc
new file mode 100644
index 0000000..4e69532
--- /dev/null
+++ b/mne/channels/data/montages/standard_1005.elc
@@ -0,0 +1,698 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	346
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-48.9708 64.0872 -47.6830
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+50.4352 63.8698 -48.0050
+-70.1019 41.6523 -49.9520
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+72.1141 42.0667 -50.4520
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-85.6192 -46.5147 -45.7070
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+86.1618 -47.0353 -45.8690
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-43.2897 75.8552 -28.2440
+-38.5517 79.9532 -4.9950
+-27.9857 82.4591 2.7020
+-17.1947 84.8491 10.0270
+-5.9317 86.8780 16.2000
+7.1053 87.0740 16.4690
+18.9233 85.5969 11.4430
+28.6443 82.9759 2.8280
+39.3203 80.6868 -4.7250
+43.8223 76.5418 -28.3070
+-63.2538 53.8573 -30.3160
+-61.3508 58.7992 0.8970
+-50.7998 64.0412 23.0890
+-34.3157 68.3931 41.1880
+-11.4357 70.7561 50.3480
+13.4793 71.2010 51.1750
+36.1833 69.1509 41.2540
+52.3972 65.0708 22.8620
+62.9152 60.0448 0.6300
+64.3342 54.5998 -30.4440
+-79.0669 28.0813 -31.2530
+-74.4999 31.3003 4.8460
+-65.2379 36.4282 36.1440
+-44.4098 40.7622 61.6900
+-15.4238 43.6600 77.6820
+17.5922 44.0540 77.7880
+45.8532 41.6228 60.6470
+67.1281 37.7998 35.2960
+78.0531 32.9817 4.4830
+80.0971 28.5137 -31.3380
+-84.1250 -1.8467 -29.7940
+-82.3550 0.8263 8.5790
+-74.6920 4.3033 45.3070
+-51.0509 7.1772 74.3770
+-18.2190 9.0941 92.5290
+18.7870 9.2479 91.5620
+51.8851 7.7978 73.5070
+77.0020 5.3357 45.3500
+83.8880 1.9457 8.5010
+84.1230 -1.8083 -29.6380
+-86.9731 -32.2157 -27.8480
+-85.5651 -30.6287 11.1530
+-76.4071 -29.7307 49.2170
+-52.9281 -28.9058 80.3040
+-18.3541 -28.3219 98.2200
+20.2199 -28.1481 98.1720
+55.1139 -28.3862 80.4740
+79.0059 -28.9863 49.6280
+85.9999 -29.8203 11.2480
+88.6249 -32.2723 -28.0000
+-78.1602 -60.7567 -23.8240
+-76.6802 -60.8317 12.8800
+-68.1152 -62.9747 47.2520
+-46.9142 -64.6908 75.2960
+-15.8202 -65.5999 91.1640
+19.4198 -65.5950 92.4050
+50.6738 -64.4822 76.1300
+71.0958 -62.6243 47.3280
+78.5198 -60.4323 12.9020
+78.9027 -60.9553 -23.8050
+-64.5973 -87.6558 -19.0140
+-62.9593 -87.5028 12.9520
+-54.0103 -89.8988 37.3320
+-35.8874 -91.6669 55.5040
+-12.0474 -92.6069 65.5080
+13.9226 -92.6940 66.9580
+37.7986 -91.6291 56.7330
+54.6087 -89.6402 37.0350
+63.1117 -87.2282 12.8560
+65.0137 -87.8062 -18.9520
+-42.8624 -108.0730 -13.1510
+-40.1204 -107.1290 12.0610
+-31.9514 -108.2520 23.0470
+-19.8624 -108.9420 29.7600
+-6.9194 -109.2600 32.7100
+6.8036 -109.1630 31.5820
+20.2936 -108.9140 28.9440
+32.1756 -108.2520 22.2550
+41.0976 -107.2450 12.1380
+43.8946 -109.1270 -13.1700
+-14.8504 -117.9870 -6.9200
+15.0946 -118.0180 -6.9330
+-14.8107 87.2351 -4.4770
+15.1623 88.0910 -4.5510
+-54.8298 66.4132 -29.7040
+-51.1757 70.8362 -1.7550
+-39.6407 74.8671 13.6780
+-27.2187 78.7091 28.3750
+-9.1977 80.6051 35.1330
+10.4823 80.8650 35.3590
+28.5803 79.3029 28.4700
+40.9403 75.7399 13.8600
+52.0293 71.8468 -1.9200
+55.7542 67.1698 -29.8240
+-71.5079 41.1193 -30.8540
+-68.5558 45.2843 3.0020
+-58.4878 50.6722 30.1920
+-39.9798 55.2601 52.6000
+-13.3838 57.9021 64.3320
+15.8342 58.4559 64.9920
+41.7942 56.2259 51.4990
+60.0522 52.0858 28.7080
+71.9592 47.1917 2.4750
+72.7981 41.8218 -31.0260
+-82.9559 13.3203 -30.8080
+-80.1139 16.3903 6.8500
+-71.2099 20.8203 41.3240
+-48.5119 24.5292 69.1360
+-17.3439 27.0241 86.9230
+18.4181 27.2709 86.4370
+49.5481 25.2378 68.4300
+73.2191 22.0067 41.2970
+81.5801 17.6837 6.5640
+83.3711 13.5477 -30.7490
+-85.1321 -17.0557 -28.7310
+-82.9461 -14.8827 10.0090
+-75.2941 -12.6397 47.9040
+-51.5811 -10.7548 78.0350
+-18.2790 -9.4319 97.3560
+19.6780 -9.3041 95.7060
+53.8059 -10.1442 77.7300
+78.1249 -11.7353 47.8400
+85.1369 -13.9063 9.8900
+86.0999 -17.0883 -28.7560
+-84.8102 -47.2457 -26.2200
+-82.7042 -46.2977 11.9740
+-73.3012 -46.7917 49.1090
+-51.0492 -47.1758 80.0160
+-17.3542 -47.3419 97.4100
+20.6798 -47.2321 98.0720
+53.9968 -46.8902 80.0770
+76.5498 -46.3733 49.1400
+85.1998 -45.8073 12.1020
+85.4428 -47.2213 -26.1760
+-72.1773 -74.6277 -21.5360
+-70.1133 -74.8677 12.9990
+-61.7283 -77.6238 43.0280
+-41.6733 -79.7528 66.7150
+-13.9613 -81.0029 81.0030
+17.2977 -80.9810 81.6410
+44.7477 -79.6111 67.6550
+63.6267 -77.3022 43.1190
+72.1037 -74.4993 13.0250
+73.2817 -75.0773 -21.5760
+-54.7754 -98.9768 -16.1930
+-51.9284 -98.4438 12.3040
+-43.3424 -100.1629 30.0090
+-28.0074 -101.3610 42.3790
+-9.5034 -102.0600 49.4180
+10.2356 -102.0290 48.9420
+28.6476 -101.3901 42.1380
+44.2206 -100.2191 29.8080
+52.8386 -98.5360 12.2500
+55.8596 -99.8940 -16.2080
+-14.8054 -115.1000 11.8290
+15.1456 -115.1910 11.8330
+-15.1584 -118.2420 -26.0480
+15.1286 -118.1510 -26.0810
+-36.1247 72.3801 -45.8520
+-43.5117 78.5802 -9.2400
+-33.2847 81.2071 -1.1400
+-22.3517 83.5621 6.0710
+-12.2417 86.1941 14.1880
+0.1703 87.3220 17.4420
+13.6223 86.7579 15.3020
+24.1013 84.3769 7.4330
+33.9133 81.8119 -1.0350
+43.9483 79.2958 -9.3000
+37.7123 72.1679 -46.1970
+-59.3398 52.6802 -48.7700
+-63.2618 55.9922 -11.1730
+-55.8198 61.3962 11.8840
+-43.3817 66.3672 32.8110
+-23.5817 69.9171 47.2930
+0.2763 71.2800 52.0920
+25.5583 70.5559 47.8270
+45.1522 67.2748 32.7310
+58.0002 62.5998 11.9000
+64.6732 57.2738 -11.4600
+60.6012 52.2668 -49.0380
+-78.4839 28.7703 -50.5220
+-76.6149 28.6533 -11.5080
+-71.5059 33.9263 20.9930
+-55.9399 38.7162 49.7880
+-30.6548 42.4151 71.0400
+0.3512 44.0740 79.1410
+32.6451 43.1009 70.7950
+57.5042 39.8518 48.8110
+74.2501 35.4997 20.3800
+79.0341 30.3437 -11.9970
+79.9201 28.9417 -50.9140
+-87.3620 -0.5147 -49.8370
+-82.6680 -0.9417 -10.2840
+-80.1330 2.5853 27.3120
+-64.1610 5.8313 60.8850
+-35.7490 8.3091 85.4590
+0.3911 9.5080 95.5600
+36.0700 8.6519 83.8320
+65.1640 6.6198 60.0520
+81.5440 3.6637 27.2010
+83.1680 0.1817 -10.3640
+85.3930 -0.9523 -49.5200
+-86.6321 -31.2377 -47.1780
+-85.9331 -31.0927 -8.4740
+-81.5431 -30.1727 30.2730
+-66.1281 -29.2957 65.8980
+-36.9301 -28.5699 91.7340
+0.3959 -28.1630 101.2690
+38.5399 -28.2251 90.9760
+68.8539 -28.6403 66.4100
+84.5529 -29.3783 30.8780
+85.9999 -30.2803 -8.4350
+86.7619 -31.7313 -47.2530
+-80.7152 -60.6457 -43.5940
+-78.5992 -59.7237 -4.7580
+-73.6642 -61.9227 30.3800
+-59.4112 -63.9248 62.6720
+-32.7283 -65.3199 85.9440
+0.3658 -65.7500 94.0580
+35.8918 -65.1381 85.9800
+62.2558 -63.6152 62.7190
+76.6708 -61.5483 30.5430
+79.3188 -59.3033 -4.8400
+81.5598 -61.2153 -43.8000
+-64.5703 -86.4318 -38.3240
+-64.5833 -86.2218 0.0330
+-58.7123 -88.7048 25.1930
+-46.1603 -90.8878 47.4460
+-24.6483 -92.2919 62.0760
+0.2727 -92.7580 67.3420
+26.4367 -92.2951 63.1990
+47.1437 -90.7122 47.6780
+60.8127 -88.5042 25.6620
+65.1517 -85.9432 -0.0090
+65.0377 -86.7182 -38.4480
+-43.1284 -107.5160 -32.3870
+-42.9764 -106.4930 5.7730
+-36.2344 -107.7160 17.7500
+-25.9844 -108.6160 26.5440
+-13.6644 -109.2660 32.8560
+0.1676 -109.2760 32.7900
+13.6506 -109.1060 30.9360
+26.6636 -108.6680 26.4150
+37.7006 -107.8400 18.0690
+43.6696 -106.5990 5.7260
+43.1766 -107.4440 -32.4630
+-29.3914 -114.5110 -10.0200
+0.0525 -119.3430 -3.9360
+29.5526 -113.6360 -10.0510
+-84.1611 -16.0187 -9.3460
+-72.4343 -73.4527 -2.4870
+85.0799 -15.0203 -9.4900
+73.0557 -73.0683 -2.5400
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+AF9
+AF7
+AF5
+AF3
+AF1
+AFz
+AF2
+AF4
+AF6
+AF8
+AF10
+F9
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F10
+FT9
+FT7
+FC5
+FC3
+FC1
+FCz
+FC2
+FC4
+FC6
+FT8
+FT10
+T9
+T7
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T8
+T10
+TP9
+TP7
+CP5
+CP3
+CP1
+CPz
+CP2
+CP4
+CP6
+TP8
+TP10
+P9
+P7
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+P8
+P10
+PO9
+PO7
+PO5
+PO3
+PO1
+POz
+PO2
+PO4
+PO6
+PO8
+PO10
+O1
+Oz
+O2
+I1
+Iz
+I2
+AFp9h
+AFp7h
+AFp5h
+AFp3h
+AFp1h
+AFp2h
+AFp4h
+AFp6h
+AFp8h
+AFp10h
+AFF9h
+AFF7h
+AFF5h
+AFF3h
+AFF1h
+AFF2h
+AFF4h
+AFF6h
+AFF8h
+AFF10h
+FFT9h
+FFT7h
+FFC5h
+FFC3h
+FFC1h
+FFC2h
+FFC4h
+FFC6h
+FFT8h
+FFT10h
+FTT9h
+FTT7h
+FCC5h
+FCC3h
+FCC1h
+FCC2h
+FCC4h
+FCC6h
+FTT8h
+FTT10h
+TTP9h
+TTP7h
+CCP5h
+CCP3h
+CCP1h
+CCP2h
+CCP4h
+CCP6h
+TTP8h
+TTP10h
+TPP9h
+TPP7h
+CPP5h
+CPP3h
+CPP1h
+CPP2h
+CPP4h
+CPP6h
+TPP8h
+TPP10h
+PPO9h
+PPO7h
+PPO5h
+PPO3h
+PPO1h
+PPO2h
+PPO4h
+PPO6h
+PPO8h
+PPO10h
+POO9h
+POO7h
+POO5h
+POO3h
+POO1h
+POO2h
+POO4h
+POO6h
+POO8h
+POO10h
+OI1h
+OI2h
+Fp1h
+Fp2h
+AF9h
+AF7h
+AF5h
+AF3h
+AF1h
+AF2h
+AF4h
+AF6h
+AF8h
+AF10h
+F9h
+F7h
+F5h
+F3h
+F1h
+F2h
+F4h
+F6h
+F8h
+F10h
+FT9h
+FT7h
+FC5h
+FC3h
+FC1h
+FC2h
+FC4h
+FC6h
+FT8h
+FT10h
+T9h
+T7h
+C5h
+C3h
+C1h
+C2h
+C4h
+C6h
+T8h
+T10h
+TP9h
+TP7h
+CP5h
+CP3h
+CP1h
+CP2h
+CP4h
+CP6h
+TP8h
+TP10h
+P9h
+P7h
+P5h
+P3h
+P1h
+P2h
+P4h
+P6h
+P8h
+P10h
+PO9h
+PO7h
+PO5h
+PO3h
+PO1h
+PO2h
+PO4h
+PO6h
+PO8h
+PO10h
+O1h
+O2h
+I1h
+I2h
+AFp9
+AFp7
+AFp5
+AFp3
+AFp1
+AFpz
+AFp2
+AFp4
+AFp6
+AFp8
+AFp10
+AFF9
+AFF7
+AFF5
+AFF3
+AFF1
+AFFz
+AFF2
+AFF4
+AFF6
+AFF8
+AFF10
+FFT9
+FFT7
+FFC5
+FFC3
+FFC1
+FFCz
+FFC2
+FFC4
+FFC6
+FFT8
+FFT10
+FTT9
+FTT7
+FCC5
+FCC3
+FCC1
+FCCz
+FCC2
+FCC4
+FCC6
+FTT8
+FTT10
+TTP9
+TTP7
+CCP5
+CCP3
+CCP1
+CCPz
+CCP2
+CCP4
+CCP6
+TTP8
+TTP10
+TPP9
+TPP7
+CPP5
+CPP3
+CPP1
+CPPz
+CPP2
+CPP4
+CPP6
+TPP8
+TPP10
+PPO9
+PPO7
+PPO5
+PPO3
+PPO1
+PPOz
+PPO2
+PPO4
+PPO6
+PPO8
+PPO10
+POO9
+POO7
+POO5
+POO3
+POO1
+POOz
+POO2
+POO4
+POO6
+POO8
+POO10
+OI1
+OIz
+OI2
+T3
+T5
+T4
+T6
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/montages/standard_1020.elc b/mne/channels/data/montages/standard_1020.elc
new file mode 100644
index 0000000..2f68b51
--- /dev/null
+++ b/mne/channels/data/montages/standard_1020.elc
@@ -0,0 +1,200 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	97
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-48.9708 64.0872 -47.6830
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+50.4352 63.8698 -48.0050
+-70.1019 41.6523 -49.9520
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+72.1141 42.0667 -50.4520
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-85.6192 -46.5147 -45.7070
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+86.1618 -47.0353 -45.8690
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-84.1611 -16.0187 -9.3460
+-72.4343 -73.4527 -2.4870
+85.0799 -15.0203 -9.4900
+73.0557 -73.0683 -2.5400
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+AF9
+AF7
+AF5
+AF3
+AF1
+AFz
+AF2
+AF4
+AF6
+AF8
+AF10
+F9
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F10
+FT9
+FT7
+FC5
+FC3
+FC1
+FCz
+FC2
+FC4
+FC6
+FT8
+FT10
+T9
+T7
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T8
+T10
+TP9
+TP7
+CP5
+CP3
+CP1
+CPz
+CP2
+CP4
+CP6
+TP8
+TP10
+P9
+P7
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+P8
+P10
+PO9
+PO7
+PO5
+PO3
+PO1
+POz
+PO2
+PO4
+PO6
+PO8
+PO10
+O1
+Oz
+O2
+O9
+Iz
+O10
+T3
+T5
+T4
+T6
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/montages/standard_alphabetic.elc b/mne/channels/data/montages/standard_alphabetic.elc
new file mode 100644
index 0000000..55367e4
--- /dev/null
+++ b/mne/channels/data/montages/standard_alphabetic.elc
@@ -0,0 +1,142 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	68
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-33.7007 76.8371 21.2270
+0.2313 80.7710 35.4170
+35.7123 77.7259 21.9560
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+-54.8404 -97.5279 2.7920
+-36.5114 -100.8529 37.1670
+0.2156 -102.1780 50.6080
+36.7816 -100.8491 36.3970
+55.6666 -97.6251 2.7300
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+B3
+B1
+Bz
+B2
+B4
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+D7
+D5
+D3
+D1
+Dz
+D2
+D4
+D6
+D8
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+E7
+E5
+E3
+E1
+Ez
+E2
+E4
+E6
+E8
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+H3
+H1
+Hz
+H2
+H4
+O1
+Oz
+O2
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/montages/standard_postfixed.elc b/mne/channels/data/montages/standard_postfixed.elc
new file mode 100644
index 0000000..3ed4d32
--- /dev/null
+++ b/mne/channels/data/montages/standard_postfixed.elc
@@ -0,0 +1,212 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	103
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+F7a
+F5a
+F3a
+F1a
+Fza
+F2a
+F4a
+F6a
+F8a
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F7p
+F5p
+F3p
+F1p
+Fzp
+F2p
+F4p
+F6p
+F8p
+T1
+T3a
+C5a
+C3a
+C1a
+Cza
+C2a
+C4a
+C6a
+T4a
+T2
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+T3p
+C5p
+C3p
+C1p
+Czp
+C2p
+C4p
+C6p
+T4p
+T5a
+P5a
+P3a
+P1a
+Pza
+P2a
+P4a
+P6a
+T6a
+Cb1a
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+Cb2a
+Cb1
+O1a
+P5p
+P3p
+P1p
+Pzp
+P2p
+P4p
+P6p
+O2a
+Cb2
+O1
+Oz
+O2
+Cb1p
+Iz
+Cb2p
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/montages/standard_prefixed.elc b/mne/channels/data/montages/standard_prefixed.elc
new file mode 100644
index 0000000..67563c0
--- /dev/null
+++ b/mne/channels/data/montages/standard_prefixed.elc
@@ -0,0 +1,160 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	77
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+55.7433 69.6568 -10.7550
+-33.7007 76.8371 21.2270
+0.2313 80.7710 35.4170
+35.7123 77.7259 21.9560
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-85.8941 -15.8287 -48.2830
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+85.5599 -16.3613 -48.2710
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-36.5114 -100.8529 37.1670
+0.2156 -102.1780 50.6080
+36.7816 -100.8491 36.3970
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+0.0045 -118.5650 -23.0780
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+aF3
+aF1
+aFz
+aF2
+aF4
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+iT1
+T1
+pF5
+pF3
+pF1
+pFz
+pF2
+pF4
+pF6
+T2
+iT2
+iT3
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+iT4
+T3A
+pC5
+pC3
+pC1
+pCz
+pC2
+pC4
+pC6
+T4A
+iT5
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+iT6
+pO5
+pO3
+pO1
+pOz
+pO2
+pO4
+pO6
+O1
+Oz
+O2
+Iz
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/montages/standard_primed.elc b/mne/channels/data/montages/standard_primed.elc
new file mode 100644
index 0000000..00ec918
--- /dev/null
+++ b/mne/channels/data/montages/standard_primed.elc
@@ -0,0 +1,212 @@
+# ASA electrode file
+ReferenceLabel	avg
+UnitPosition	mm
+NumberPositions=	103
+Positions
+-86.0761 -19.9897 -47.9860
+85.7939 -20.0093 -48.0310
+0.0083 86.8110 -39.9830
+-29.4367 83.9171 -6.9900
+0.1123 88.2470 -1.7130
+29.8723 84.8959 -7.0800
+-54.8397 68.5722 -10.5900
+-45.4307 72.8622 5.9780
+-33.7007 76.8371 21.2270
+-18.4717 79.9041 32.7520
+0.2313 80.7710 35.4170
+19.8203 80.3019 32.7640
+35.7123 77.7259 21.9560
+46.5843 73.8078 6.0340
+55.7433 69.6568 -10.7550
+-70.2629 42.4743 -11.4200
+-64.4658 48.0353 16.9210
+-50.2438 53.1112 42.1920
+-27.4958 56.9311 60.3420
+0.3122 58.5120 66.4620
+29.5142 57.6019 59.5400
+51.8362 54.3048 40.8140
+67.9142 49.8297 16.3670
+73.0431 44.4217 -12.0000
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+-84.0759 14.5673 -50.4290
+-80.7750 14.1203 -11.1350
+-77.2149 18.6433 24.4600
+-60.1819 22.7162 55.5440
+-34.0619 26.0111 79.9870
+0.3761 27.3900 88.6680
+34.7841 26.4379 78.8080
+62.2931 23.7228 55.6300
+79.5341 19.9357 24.4380
+81.8151 15.4167 -11.3300
+84.1131 14.3647 -50.5380
+-84.1611 -16.0187 -9.3460
+-80.2801 -13.7597 29.1600
+-65.3581 -11.6317 64.3580
+-36.1580 -9.9839 89.7520
+0.4009 -9.1670 100.2440
+37.6720 -9.6241 88.4120
+67.1179 -10.9003 63.5800
+83.4559 -12.7763 29.2080
+85.0799 -15.0203 -9.4900
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-84.8302 -46.0217 -7.0560
+-79.5922 -46.5507 30.9490
+-63.5562 -47.0088 65.6240
+-35.5131 -47.2919 91.3150
+0.3858 -47.3180 99.4320
+38.3838 -47.0731 90.6950
+66.6118 -46.6372 65.5800
+83.3218 -46.1013 31.2060
+85.5488 -45.5453 -7.1300
+-73.0093 -73.7657 -40.9980
+-72.4343 -73.4527 -2.4870
+-67.2723 -76.2907 28.3820
+-53.0073 -78.7878 55.9400
+-28.6203 -80.5249 75.4360
+0.3247 -81.1150 82.6150
+31.9197 -80.4871 76.7160
+55.6667 -78.5602 56.5610
+67.8877 -75.9043 28.0910
+73.0557 -73.0683 -2.5400
+73.8947 -74.3903 -41.2200
+-54.9104 -98.0448 -35.4650
+-54.8404 -97.5279 2.7920
+-48.4244 -99.3408 21.5990
+-36.5114 -100.8529 37.1670
+-18.9724 -101.7680 46.5360
+0.2156 -102.1780 50.6080
+19.8776 -101.7930 46.3930
+36.7816 -100.8491 36.3970
+49.8196 -99.4461 21.7270
+55.6666 -97.6251 2.7300
+54.9876 -98.0911 -35.5410
+-29.4134 -112.4490 8.8390
+0.1076 -114.8920 14.6570
+29.8426 -112.1560 8.8000
+-29.8184 -114.5700 -29.2160
+0.0045 -118.5650 -23.0780
+29.7416 -114.2600 -29.2560
+-86.0761 -44.9897 -67.9860
+ 85.7939 -45.0093 -68.0310
+-86.0761 -24.9897 -67.9860
+ 85.7939 -25.0093 -68.0310
+Labels
+LPA
+RPA
+Nz
+Fp1
+Fpz
+Fp2
+F7'
+F5'
+F3'
+F1'
+Fz'
+F2'
+F4'
+F6'
+F8'
+F7
+F5
+F3
+F1
+Fz
+F2
+F4
+F6
+F8
+F7''
+F5''
+F3''
+F1''
+Fz''
+F2''
+F4''
+F6''
+F8''
+T1
+T3'
+C5'
+C3'
+C1'
+Cz'
+C2'
+C4'
+C6'
+T4'
+T2
+T3
+C5
+C3
+C1
+Cz
+C2
+C4
+C6
+T4
+T3''
+C5''
+C3''
+C1''
+Cz''
+C2''
+C4''
+C6''
+T4''
+T5'
+P5'
+P3'
+P1'
+Pz'
+P2'
+P4'
+P6'
+T6'
+Cb1'
+T5
+P5
+P3
+P1
+Pz
+P2
+P4
+P6
+T6
+Cb2'
+Cb1
+O1'
+P5''
+P3''
+P1''
+Pz''
+P2''
+P4''
+P6''
+O2'
+Cb2
+O1
+Oz
+O2
+Cb1''
+Iz
+Cb2''
+M1
+M2
+A1
+A2
diff --git a/mne/channels/data/neighbors/KIT-157_neighb.mat b/mne/channels/data/neighbors/KIT-157_neighb.mat
new file mode 100644
index 0000000..1cae3fc
Binary files /dev/null and b/mne/channels/data/neighbors/KIT-157_neighb.mat differ
diff --git a/mne/channels/data/neighbors/KIT-208_neighb.mat b/mne/channels/data/neighbors/KIT-208_neighb.mat
new file mode 100644
index 0000000..81de840
Binary files /dev/null and b/mne/channels/data/neighbors/KIT-208_neighb.mat differ
diff --git a/mne/channels/data/neighbors/__init__.py b/mne/channels/data/neighbors/__init__.py
new file mode 100644
index 0000000..8fc6ea7
--- /dev/null
+++ b/mne/channels/data/neighbors/__init__.py
@@ -0,0 +1,6 @@
+# Neighbor definitions for clustering permutation analysis.
+# This is a selection of files from http://fieldtrip.fcdonders.nl/template
+# Additional definitions can be obtained through the FieldTrip software.
+# For additional information on how these definitions were computed, please
+# consider the related fieldtrip documentation:
+# http://fieldtrip.fcdonders.nl/template/neighbours.
diff --git a/mne/channels/data/neighbors/biosemi16_neighb.mat b/mne/channels/data/neighbors/biosemi16_neighb.mat
new file mode 100755
index 0000000..56b7fb6
Binary files /dev/null and b/mne/channels/data/neighbors/biosemi16_neighb.mat differ
diff --git a/mne/channels/data/neighbors/biosemi32_neighb.mat b/mne/channels/data/neighbors/biosemi32_neighb.mat
new file mode 100755
index 0000000..1c29040
Binary files /dev/null and b/mne/channels/data/neighbors/biosemi32_neighb.mat differ
diff --git a/mne/channels/data/neighbors/biosemi64_neighb.mat b/mne/channels/data/neighbors/biosemi64_neighb.mat
new file mode 100755
index 0000000..4afbf6f
Binary files /dev/null and b/mne/channels/data/neighbors/biosemi64_neighb.mat differ
diff --git a/mne/channels/data/neighbors/bti148_neighb.mat b/mne/channels/data/neighbors/bti148_neighb.mat
new file mode 100755
index 0000000..527e435
Binary files /dev/null and b/mne/channels/data/neighbors/bti148_neighb.mat differ
diff --git a/mne/channels/data/neighbors/bti248_neighb.mat b/mne/channels/data/neighbors/bti248_neighb.mat
new file mode 100755
index 0000000..9bde76b
Binary files /dev/null and b/mne/channels/data/neighbors/bti248_neighb.mat differ
diff --git a/mne/channels/data/neighbors/bti248grad_neighb.mat b/mne/channels/data/neighbors/bti248grad_neighb.mat
new file mode 100755
index 0000000..4e5d620
Binary files /dev/null and b/mne/channels/data/neighbors/bti248grad_neighb.mat differ
diff --git a/mne/channels/data/neighbors/ctf151_neighb.mat b/mne/channels/data/neighbors/ctf151_neighb.mat
new file mode 100755
index 0000000..611a0bc
Binary files /dev/null and b/mne/channels/data/neighbors/ctf151_neighb.mat differ
diff --git a/mne/channels/data/neighbors/ctf275_neighb.mat b/mne/channels/data/neighbors/ctf275_neighb.mat
new file mode 100755
index 0000000..91cf84e
Binary files /dev/null and b/mne/channels/data/neighbors/ctf275_neighb.mat differ
diff --git a/mne/channels/data/neighbors/ctf64_neighb.mat b/mne/channels/data/neighbors/ctf64_neighb.mat
new file mode 100755
index 0000000..fd001e6
Binary files /dev/null and b/mne/channels/data/neighbors/ctf64_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat
new file mode 100755
index 0000000..020392d
Binary files /dev/null and b/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat
new file mode 100755
index 0000000..62c88f0
Binary files /dev/null and b/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat
new file mode 100755
index 0000000..e59536c
Binary files /dev/null and b/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycapM11_neighb.mat b/mne/channels/data/neighbors/easycapM11_neighb.mat
new file mode 100755
index 0000000..28131e7
Binary files /dev/null and b/mne/channels/data/neighbors/easycapM11_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycapM14_neighb.mat b/mne/channels/data/neighbors/easycapM14_neighb.mat
new file mode 100755
index 0000000..be2ad3d
Binary files /dev/null and b/mne/channels/data/neighbors/easycapM14_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycapM15_neighb.mat b/mne/channels/data/neighbors/easycapM15_neighb.mat
new file mode 100755
index 0000000..7dfa554
Binary files /dev/null and b/mne/channels/data/neighbors/easycapM15_neighb.mat differ
diff --git a/mne/channels/data/neighbors/easycapM1_neighb.mat b/mne/channels/data/neighbors/easycapM1_neighb.mat
new file mode 100755
index 0000000..f60d60d
Binary files /dev/null and b/mne/channels/data/neighbors/easycapM1_neighb.mat differ
diff --git a/mne/channels/data/neighbors/neuromag122_neighb.mat b/mne/channels/data/neighbors/neuromag122_neighb.mat
new file mode 100755
index 0000000..e8bbb75
Binary files /dev/null and b/mne/channels/data/neighbors/neuromag122_neighb.mat differ
diff --git a/mne/channels/data/neighbors/neuromag306mag_neighb.mat b/mne/channels/data/neighbors/neuromag306mag_neighb.mat
new file mode 100755
index 0000000..d7ffc98
Binary files /dev/null and b/mne/channels/data/neighbors/neuromag306mag_neighb.mat differ
diff --git a/mne/channels/data/neighbors/neuromag306planar_neighb.mat b/mne/channels/data/neighbors/neuromag306planar_neighb.mat
new file mode 100755
index 0000000..aa0529e
Binary files /dev/null and b/mne/channels/data/neighbors/neuromag306planar_neighb.mat differ
diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py
new file mode 100644
index 0000000..0b355a4
--- /dev/null
+++ b/mne/channels/interpolation.py
@@ -0,0 +1,207 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from numpy.polynomial.legendre import legval
+from scipy import linalg
+
+from ..utils import logger
+from ..io.pick import pick_types, pick_channels
+from ..surface import _normalize_vectors
+from ..bem import _fit_sphere
+from ..forward import _map_meg_channels
+
+
+def _calc_g(cosang, stiffness=4, num_lterms=50):
+    """Calculate spherical spline g function between points on a sphere.
+
+    Parameters
+    ----------
+    cosang : array-like of float, shape(n_channels, n_channels)
+        cosine of angles between pairs of points on a spherical surface. This
+        is equivalent to the dot product of unit vectors.
+    stiffness : float
+        stiffness of the spline.
+    num_lterms : int
+        number of Legendre terms to evaluate.
+
+    Returns
+    -------
+    G : np.ndrarray of float, shape(n_channels, n_channels)
+        The G matrix.
+    """
+    factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness *
+                              4 * np.pi) for n in range(1, num_lterms + 1)]
+    return legval(cosang, [0] + factors)
+
+
+def _calc_h(cosang, stiffness=4, num_lterms=50):
+    """Calculate spherical spline h function between points on a sphere.
+
+    Parameters
+    ----------
+    cosang : array-like of float, shape(n_channels, n_channels)
+        cosine of angles between pairs of points on a spherical surface. This
+        is equivalent to the dot product of unit vectors.
+    stiffness : float
+        stiffness of the spline. Also referred to as `m`.
+    num_lterms : int
+        number of Legendre terms to evaluate.
+    H : np.ndrarray of float, shape(n_channels, n_channels)
+        The H matrix.
+    """
+    factors = [(2 * n + 1) /
+               (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi)
+               for n in range(1, num_lterms + 1)]
+    return legval(cosang, [0] + factors)
+
+
+def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5):
+    """Compute interpolation matrix based on spherical splines
+
+    Implementation based on [1]
+
+    Parameters
+    ----------
+    pos_from : np.ndarray of float, shape(n_good_sensors, 3)
+        The positions to interpoloate from.
+    pos_to : np.ndarray of float, shape(n_bad_sensors, 3)
+        The positions to interpoloate.
+    alpha : float
+        Regularization parameter. Defaults to 1e-5.
+
+    Returns
+    -------
+    interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to))
+        The interpolation matrix that maps good signals to the location
+        of bad signals.
+
+    References
+    ----------
+    [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989).
+        Spherical splines for scalp potential and current density mapping.
+        Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7.
+    """
+
+    pos_from = pos_from.copy()
+    pos_to = pos_to.copy()
+
+    # normalize sensor positions to sphere
+    _normalize_vectors(pos_from)
+    _normalize_vectors(pos_to)
+
+    # cosine angles between source positions
+    cosang_from = pos_from.dot(pos_from.T)
+    cosang_to_from = pos_to.dot(pos_from.T)
+    G_from = _calc_g(cosang_from)
+    G_to_from, H_to_from = (f(cosang_to_from) for f in (_calc_g, _calc_h))
+
+    if alpha is not None:
+        G_from.flat[::len(G_from) + 1] += alpha
+
+    C_inv = linalg.pinv(G_from)
+    interpolation = G_to_from.dot(C_inv)
+    return interpolation
+
+
+def _do_interp_dots(inst, interpolation, goods_idx, bads_idx):
+    """Dot product of channel mapping matrix to channel data
+    """
+    from ..io.base import _BaseRaw
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+
+    if isinstance(inst, _BaseRaw):
+        inst._data[bads_idx] = interpolation.dot(inst._data[goods_idx])
+    elif isinstance(inst, _BaseEpochs):
+        inst._data[:, bads_idx, :] = np.einsum('ij,xjy->xiy', interpolation,
+                                               inst._data[:, goods_idx, :])
+    elif isinstance(inst, Evoked):
+        inst.data[bads_idx] = interpolation.dot(inst.data[goods_idx])
+    else:
+        raise ValueError('Inputs of type {0} are not supported'
+                         .format(type(inst)))
+
+
+def _interpolate_bads_eeg(inst):
+    """Interpolate bad EEG channels
+
+    Operates in place.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    """
+    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
+    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
+
+    picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
+    inst.info._check_consistency()
+    bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]
+
+    if len(picks) == 0 or len(bads_idx) == 0:
+        return
+
+    goods_idx[picks] = True
+    goods_idx[bads_idx] = False
+
+    pos = inst._get_channel_positions(picks)
+
+    # Make sure only EEG are used
+    bads_idx_pos = bads_idx[picks]
+    goods_idx_pos = goods_idx[picks]
+
+    pos_good = pos[goods_idx_pos]
+    pos_bad = pos[bads_idx_pos]
+
+    # test spherical fit
+    radius, center = _fit_sphere(pos_good)
+    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
+    distance = np.mean(distance / radius)
+    if np.abs(1. - distance) > 0.1:
+        logger.warning('Your spherical fit is poor, interpolation results are '
+                       'likely to be inaccurate.')
+
+    logger.info('Computing interpolation matrix from {0} sensor '
+                'positions'.format(len(pos_good)))
+
+    interpolation = _make_interpolation_matrix(pos_good, pos_bad)
+
+    logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
+    _do_interp_dots(inst, interpolation, goods_idx, bads_idx)
+
+
+def _interpolate_bads_meg(inst, mode='accurate', verbose=None):
+    """Interpolate bad channels from data in good channels.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used for interpolation. `'fast'` should
+        be sufficient for most applications.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    """
+    picks_meg = pick_types(inst.info, meg=True, eeg=False, exclude=[])
+    ch_names = [inst.info['ch_names'][p] for p in picks_meg]
+    picks_good = pick_types(inst.info, meg=True, eeg=False, exclude='bads')
+
+    # select the bad meg channel to be interpolated
+    if len(inst.info['bads']) == 0:
+        picks_bad = []
+    else:
+        picks_bad = pick_channels(ch_names, inst.info['bads'],
+                                  exclude=[])
+
+    # return without doing anything if there are no meg channels
+    if len(picks_meg) == 0 or len(picks_bad) == 0:
+        return
+
+    mapping = _map_meg_channels(inst, picks_good, picks_bad, mode=mode)
+
+    _do_interp_dots(inst, mapping, picks_good, picks_bad)
diff --git a/mne/channels/layout.py b/mne/channels/layout.py
new file mode 100644
index 0000000..fb21ac8
--- /dev/null
+++ b/mne/channels/layout.py
@@ -0,0 +1,825 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: Simplified BSD
+
+import logging
+from collections import defaultdict
+from itertools import combinations
+import os.path as op
+
+import numpy as np
+
+from ..transforms import _polar_to_cartesian, _cartesian_to_sphere
+from ..io.pick import pick_types
+from ..io.constants import FIFF
+from ..utils import _clean_names
+from ..externals.six.moves import map
+
+
+class Layout(object):
+    """Sensor layouts
+
+    Layouts are typically loaded from a file using read_layout. Only use this
+    class directly if you're constructing a new layout.
+
+    Parameters
+    ----------
+    box : tuple of length 4
+        The box dimension (x_min, x_max, y_min, y_max).
+    pos : array, shape=(n_channels, 4)
+        The positions of the channels in 2d (x, y, width, height).
+    names : list
+        The channel names.
+    ids : list
+        The channel ids.
+    kind : str
+        The type of Layout (e.g. 'Vectorview-all').
+    """
+    def __init__(self, box, pos, names, ids, kind):
+        self.box = box
+        self.pos = pos
+        self.names = names
+        self.ids = ids
+        self.kind = kind
+
+    def save(self, fname):
+        """Save Layout to disk
+
+        Parameters
+        ----------
+        fname : str
+            The file name (e.g. 'my_layout.lout').
+
+        See Also
+        --------
+        read_layout
+        """
+        x = self.pos[:, 0]
+        y = self.pos[:, 1]
+        width = self.pos[:, 2]
+        height = self.pos[:, 3]
+        if fname.endswith('.lout'):
+            out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
+        elif fname.endswith('.lay'):
+            out_str = ''
+        else:
+            raise ValueError('Unknown layout type. Should be of type '
+                             '.lout or .lay.')
+
+        for ii in range(x.shape[0]):
+            out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
+                        x[ii], y[ii], width[ii], height[ii], self.names[ii]))
+
+        f = open(fname, 'w')
+        f.write(out_str)
+        f.close()
+
+    def __repr__(self):
+        return '<Layout | %s - Channels: %s ...>' % (self.kind,
+                                                     ', '.join(self.names[:3]))
+
+
+def _read_lout(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box_line = f.readline()  # first line contains box dimension
+        box = tuple(map(float, box_line.split()))
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            if len(splits) == 7:
+                cid, x, y, dx, dy, chkind, nb = splits
+                name = chkind + ' ' + nb
+            else:
+                cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def _read_lay(fname):
+    """Aux function"""
+    with open(fname) as f:
+        box = None
+        names, pos, ids = [], [], []
+        for line in f:
+            splits = line.split()
+            if len(splits) == 7:
+                cid, x, y, dx, dy, chkind, nb = splits
+                name = chkind + ' ' + nb
+            else:
+                cid, x, y, dx, dy, name = splits
+            pos.append(np.array([x, y, dx, dy], dtype=np.float))
+            names.append(name)
+            ids.append(int(cid))
+
+    pos = np.array(pos)
+
+    return box, pos, names, ids
+
+
+def read_layout(kind, path=None, scale=True):
+    """Read layout from a file
+
+    Parameters
+    ----------
+    kind : str
+        The name of the .lout file (e.g. kind='Vectorview-all' for
+        'Vectorview-all.lout').
+
+    path : str | None
+        The path of the folder containing the Layout file. Defaults to the
+        mne/channels/data/layouts folder inside your mne-python installation.
+
+    scale : bool
+        Apply useful scaling for out the box plotting using layout.pos.
+        Defaults to True.
+
+    Returns
+    -------
+    layout : instance of Layout
+        The layout.
+
+    See Also
+    --------
+    Layout.save
+    """
+    if path is None:
+        path = op.join(op.dirname(__file__), 'data', 'layouts')
+
+    if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
+        kind += '.lout'
+    elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
+        kind += '.lay'
+
+    if kind.endswith('.lout'):
+        fname = op.join(path, kind)
+        kind = kind[:-5]
+        box, pos, names, ids = _read_lout(fname)
+    elif kind.endswith('.lay'):
+        fname = op.join(path, kind)
+        kind = kind[:-4]
+        box, pos, names, ids = _read_lay(fname)
+        kind.endswith('.lay')
+    else:
+        raise ValueError('Unknown layout type. Should be of type '
+                         '.lout or .lay.')
+
+    if scale:
+        pos[:, 0] -= np.min(pos[:, 0])
+        pos[:, 1] -= np.min(pos[:, 1])
+        scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
+        pos /= scaling
+        pos[:, :2] += 0.03
+        pos[:, :2] *= 0.97 / 1.03
+        pos[:, 2:] *= 0.94
+
+    return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
+
+
+def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
+    """Create .lout file from EEG electrode digitization
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info (e.g., raw.info).
+    radius : float
+        Viewport radius as a fraction of main figure height. Defaults to 0.5.
+    width : float | None
+        Width of sensor axes as a fraction of main figure height. By default,
+        this will be the maximum width possible without axes overlapping.
+    height : float | None
+        Height of sensor axes as a fraction of main figure height. By default,
+        this will be the maximum height possible withough axes overlapping.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any.
+        If 'bads', exclude channels in info['bads'] (default).
+
+    Returns
+    -------
+    layout : Layout
+        The generated Layout.
+
+    See Also
+    --------
+    make_grid_layout, generate_2d_layout
+    """
+    if not (0 <= radius <= 0.5):
+        raise ValueError('The radius parameter should be between 0 and 0.5.')
+    if width is not None and not (0 <= width <= 1.0):
+        raise ValueError('The width parameter should be between 0 and 1.')
+    if height is not None and not (0 <= height <= 1.0):
+        raise ValueError('The height parameter should be between 0 and 1.')
+
+    picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                       exclude=exclude)
+    loc2d = _auto_topomap_coords(info, picks)
+    names = [info['chs'][i]['ch_name'] for i in picks]
+
+    # Scale [x, y] to [-0.5, 0.5]
+    loc2d_min = np.min(loc2d, axis=0)
+    loc2d_max = np.max(loc2d, axis=0)
+    loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
+
+    # If no width or height specified, calculate the maximum value possible
+    # without axes overlapping.
+    if width is None or height is None:
+        width, height = _box_size(loc2d, width, height, padding=0.1)
+
+    # Scale to viewport radius
+    loc2d *= 2 * radius
+
+    # Some subplot centers will be at the figure edge. Shrink everything so it
+    # fits in the figure.
+    scaling = min(1 / (1. + width), 1 / (1. + height))
+    loc2d *= scaling
+    width *= scaling
+    height *= scaling
+
+    # Shift to center
+    loc2d += 0.5
+
+    n_channels = loc2d.shape[0]
+    pos = np.c_[loc2d[:, 0] - 0.5 * width,
+                loc2d[:, 1] - 0.5 * height,
+                width * np.ones(n_channels),
+                height * np.ones(n_channels)]
+
+    box = (0, 1, 0, 1)
+    ids = 1 + np.arange(n_channels)
+    layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
+    return layout
+
+
+def make_grid_layout(info, picks=None, n_col=None):
+    """ Generate .lout file for custom data, i.e., ICA sources
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info | None
+        Measurement info (e.g., raw.info). If None, default names will be
+        employed.
+    picks : array-like of int | None
+        The indices of the channels to be included. If None, al misc channels
+        will be included.
+    n_col : int | None
+        Number of columns to generate. If None, a square grid will be produced.
+
+    Returns
+    -------
+    layout : Layout
+        The generated layout.
+
+    See Also
+    --------
+    make_eeg_layout, generate_2d_layout
+    """
+    if picks is None:
+        picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
+
+    names = [info['chs'][k]['ch_name'] for k in picks]
+
+    if not names:
+        raise ValueError('No misc data channels found.')
+
+    ids = list(range(len(picks)))
+    size = len(picks)
+
+    if n_col is None:
+        # prepare square-like layout
+        n_row = n_col = np.sqrt(size)  # try square
+        if n_col % 1:
+            # try n * (n-1) rectangle
+            n_col, n_row = int(n_col + 1), int(n_row)
+
+        if n_col * n_row < size:  # jump to the next full square
+            n_row += 1
+    else:
+        n_row = np.ceil(size / float(n_col))
+
+    # setup position grid
+    x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
+                       np.linspace(-0.5, 0.5, n_row))
+    x, y = x.ravel()[:size], y.ravel()[:size]
+    width, height = _box_size(np.c_[x, y], padding=0.1)
+
+    # Some axes will be at the figure edge. Shrink everything so it fits in the
+    # figure. Add 0.01 border around everything
+    border_x, border_y = (0.01, 0.01)
+    x_scaling = 1 / (1. + width + border_x)
+    y_scaling = 1 / (1. + height + border_y)
+    x = x * x_scaling
+    y = y * y_scaling
+    width *= x_scaling
+    height *= y_scaling
+
+    # Shift to center
+    x += 0.5
+    y += 0.5
+
+    # calculate pos
+    pos = np.c_[x - 0.5 * width, y - 0.5 * height,
+                width * np.ones(size), height * np.ones(size)]
+    box = (0, 1, 0, 1)
+
+    layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
+    return layout
+
+
+def find_layout(info, ch_type=None, exclude='bads'):
+    """Choose a layout based on the channels in the info 'chs' field
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
+        The channel type for selecting single channel layouts.
+        Defaults to None. Note, this argument will only be considered for
+        VectorView type layout. Use `meg` to force using the full layout
+        in situations where the info does only contain one sensor type.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads'].
+
+    Returns
+    -------
+    layout : Layout instance | None
+        None if layout not found.
+    """
+    our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
+    if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
+        raise ValueError('Invalid channel type (%s) requested '
+                         '`ch_type` must be %s' % (ch_type, our_types))
+
+    chs = info['chs']
+    coil_types = set([ch['coil_type'] for ch in chs])
+    channel_types = set([ch['kind'] for ch in chs])
+
+    has_vv_mag = any(k in coil_types for k in
+                     [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
+                      FIFF.FIFFV_COIL_VV_MAG_T3])
+    has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
+                                                FIFF.FIFFV_COIL_VV_PLANAR_T2,
+                                                FIFF.FIFFV_COIL_VV_PLANAR_T3])
+    has_vv_meg = has_vv_mag and has_vv_grad
+    has_vv_only_mag = has_vv_mag and not has_vv_grad
+    has_vv_only_grad = has_vv_grad and not has_vv_mag
+    is_old_vv = ' ' in chs[0]['ch_name']
+
+    has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
+    ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
+                       FIFF.FIFFV_COIL_CTF_REF_GRAD,
+                       FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
+    has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
+                    (FIFF.FIFFV_MEG_CH in channel_types and
+                     any(k in ctf_other_types for k in coil_types)))
+    # hack due to MNE-C bug in IO of CTF
+    n_kit_grads = sum(ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD
+                      for ch in chs)
+
+    has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
+                       n_kit_grads])
+    has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
+                     FIFF.FIFFV_EEG_CH in channel_types)
+    has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
+    has_eeg_coils_only = has_eeg_coils and not has_any_meg
+
+    if ch_type == "meg" and not has_any_meg:
+        raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
+
+    if ch_type == "eeg" and not has_eeg_coils:
+        raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
+
+    if ((has_vv_meg and ch_type is None) or
+            (any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
+        layout_name = 'Vectorview-all'
+    elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
+        layout_name = 'Vectorview-mag'
+    elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
+        layout_name = 'Vectorview-grad'
+    elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
+          (has_eeg_coils_and_meg and ch_type == 'eeg')):
+        if not isinstance(info, dict):
+            raise RuntimeError('Cannot make EEG layout, no measurement info '
+                               'was passed to `find_layout`')
+        return make_eeg_layout(info, exclude=exclude)
+    elif has_4D_mag:
+        layout_name = 'magnesWH3600'
+    elif has_CTF_grad:
+        layout_name = 'CTF-275'
+    elif n_kit_grads == 157:
+        layout_name = 'KIT-157'
+    elif n_kit_grads == 208:
+        layout_name = 'KIT-AD'
+    else:
+        return None
+
+    layout = read_layout(layout_name)
+    if not is_old_vv:
+        layout.names = _clean_names(layout.names, remove_whitespace=True)
+    if has_CTF_grad:
+        layout.names = _clean_names(layout.names, before_dash=True)
+
+    return layout
+
+
+def _box_size(points, width=None, height=None, padding=0.0):
+    """ Given a series of points, calculate an appropriate box size.
+
+    Parameters
+    ----------
+    points : array, shape (n_points, 2)
+        The centers of the axes as a list of (x, y) coordinate pairs. Normally
+        these are points in the range [0, 1] centered at 0.5.
+    width : float | None
+        An optional box width to enforce. When set, only the box height will be
+        calculated by the function.
+    height : float | None
+        An optional box height to enforce. When set, only the box width will be
+        calculated by the function.
+    padding : float
+        Portion of the box to reserve for padding. The value can range between
+        0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
+
+    Returns
+    -------
+    width : float
+        Width of the box
+    height : float
+        Height of the box
+    """
+    from scipy.spatial.distance import pdist
+
+    def xdiff(a, b):
+        return np.abs(a[0] - b[0])
+
+    def ydiff(a, b):
+        return np.abs(a[1] - b[1])
+
+    points = np.asarray(points)
+    all_combinations = list(combinations(points, 2))
+
+    if width is None and height is None:
+        if len(points) <= 1:
+            # Trivial case first
+            width = 1.0
+            height = 1.0
+        else:
+            # Find the closest two points A and B.
+            a, b = all_combinations[np.argmin(pdist(points))]
+
+            # The closest points define either the max width or max height.
+            w, h = xdiff(a, b), ydiff(a, b)
+            if w > h:
+                width = w
+            else:
+                height = h
+
+    # At this point, either width or height is known, or both are known.
+    if height is None:
+        # Find all axes that could potentially overlap horizontally.
+        hdist = pdist(points, xdiff)
+        candidates = [all_combinations[i] for i, d in enumerate(hdist)
+                      if d < width]
+
+        if len(candidates) == 0:
+            # No axes overlap, take all the height you want.
+            height = 1.0
+        else:
+            # Find an appropriate height so all none of the found axes will
+            # overlap.
+            height = np.min([ydiff(*c) for c in candidates])
+
+    elif width is None:
+        # Find all axes that could potentially overlap vertically.
+        vdist = pdist(points, ydiff)
+        candidates = [all_combinations[i] for i, d in enumerate(vdist)
+                      if d < height]
+
+        if len(candidates) == 0:
+            # No axes overlap, take all the width you want.
+            width = 1.0
+        else:
+            # Find an appropriate width so all none of the found axes will
+            # overlap.
+            width = np.min([xdiff(*c) for c in candidates])
+
+    # Add a bit of padding between boxes
+    width *= 1 - padding
+    height *= 1 - padding
+
+    return width, height
+
+
+def _find_topomap_coords(info, picks, layout=None):
+    """Try to guess the E/MEG layout and return appropriate topomap coordinates
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    picks : list of int
+        Channel indices to generate topomap coords for.
+    layout : None | instance of Layout
+        Enforce using a specific layout. With None, a new map is generated.
+        With None, a layout is chosen based on the channels in the chs
+        parameter.
+
+    Returns
+    -------
+    coords : array, shape = (n_chs, 2)
+        2 dimensional coordinates for each sensor for a topomap plot.
+    """
+    if len(picks) == 0:
+        raise ValueError("Need more than 0 channels.")
+
+    if layout is not None:
+        chs = [info['chs'][i] for i in picks]
+        pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
+        pos = np.asarray(pos)
+    else:
+        pos = _auto_topomap_coords(info, picks)
+
+    return pos
+
+
+def _auto_topomap_coords(info, picks):
+    """Make a 2 dimensional sensor map from sensor positions in an info dict.
+    The default is to use the electrode locations. The fallback option is to
+    attempt using digitization points of kind FIFFV_POINT_EEG. This only works
+    with EEG and requires an equal number of digitization points and sensors.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        The measurement info.
+    picks : list of int
+        The channel indices to generate topomap coords for.
+
+    Returns
+    -------
+    locs : array, shape = (n_sensors, 2)
+        An array of positions of the 2 dimensional map.
+    """
+    from scipy.spatial.distance import pdist
+
+    chs = [info['chs'][i] for i in picks]
+
+    # Use channel locations if available
+    locs3d = np.array([ch['loc'][:3] for ch in chs])
+
+    # If electrode locations are not available, use digization points
+    if len(locs3d) == 0 or np.allclose(locs3d, 0):
+        logging.warning('Did not find any electrode locations the info, '
+                        'will attempt to use digitization points instead. '
+                        'However, if digitization points do not correspond to '
+                        'the EEG electrodes, this will lead to bad results. '
+                        'Please verify that the sensor locations in the plot '
+                        'are accurate.')
+
+        # MEG/EOG/ECG sensors don't have digitization points; all requested
+        # channels must be EEG
+        for ch in chs:
+            if ch['kind'] != FIFF.FIFFV_EEG_CH:
+                raise ValueError("Cannot determine location of MEG/EOG/ECG "
+                                 "channels using digitization points.")
+
+        eeg_ch_names = [ch['ch_name'] for ch in info['chs']
+                        if ch['kind'] == FIFF.FIFFV_EEG_CH]
+
+        # Get EEG digitization points
+        if info['dig'] is None or len(info['dig']) == 0:
+            raise RuntimeError('No digitization points found.')
+
+        locs3d = np.array([point['r'] for point in info['dig']
+                           if point['kind'] == FIFF.FIFFV_POINT_EEG])
+
+        if len(locs3d) == 0:
+            raise RuntimeError('Did not find any digitization points of '
+                               'kind FIFFV_POINT_EEG (%d) in the info.'
+                               % FIFF.FIFFV_POINT_EEG)
+
+        if len(locs3d) != len(eeg_ch_names):
+            raise ValueError("Number of EEG digitization points (%d) "
+                             "doesn't match the number of EEG channels "
+                             "(%d)" % (len(locs3d), len(eeg_ch_names)))
+
+        # Center digitization points on head origin
+        dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
+                     FIFF.FIFFV_POINT_EEG,
+                     FIFF.FIFFV_POINT_EXTRA)
+        from ..preprocessing.maxfilter import fit_sphere_to_headshape
+        _, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds)
+        origin_head /= 1000.  # to meters
+        locs3d -= origin_head
+
+        # Match the digitization points with the requested
+        # channels.
+        eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
+        locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
+
+    # Duplicate points cause all kinds of trouble during visualization
+    if np.min(pdist(locs3d)) < 1e-10:
+        raise ValueError('Electrode positions must be unique.')
+
+    x, y, z = locs3d.T
+    az, el, r = _cartesian_to_sphere(x, y, z)
+    locs2d = np.c_[_polar_to_cartesian(az, np.pi / 2 - el)]
+    return locs2d
+
+
+def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
+    """Find the picks for pairing grad channels
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        An info dictionary containing channel information.
+    layout : Layout | None
+        The layout if available. Defaults to None.
+    topomap_coords : bool
+        Return the coordinates for a topomap plot along with the picks. If
+        False, only picks are returned. Defaults to True.
+    exclude : list of str | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
+
+    Returns
+    -------
+    picks : array of int
+        Picks for the grad channels, ordered in pairs.
+    coords : array, shape = (n_grad_channels, 3)
+        Coordinates for a topomap plot (optional, only returned if
+        topomap_coords == True).
+    """
+    # find all complete pairs of grad channels
+    pairs = defaultdict(list)
+    grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
+    for i in grad_picks:
+        ch = info['chs'][i]
+        name = ch['ch_name']
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(ch)
+    pairs = [p for p in pairs.values() if len(p) == 2]
+    if len(pairs) == 0:
+        raise ValueError("No 'grad' channel pairs found.")
+
+    # find the picks corresponding to the grad channels
+    grad_chs = sum(pairs, [])
+    ch_names = info['ch_names']
+    picks = [ch_names.index(c['ch_name']) for c in grad_chs]
+
+    if topomap_coords:
+        shape = (len(pairs), 2, -1)
+        coords = (_find_topomap_coords(info, picks, layout)
+                  .reshape(shape).mean(axis=1))
+        return picks, coords
+    else:
+        return picks
+
+
+# this function is used to pair grad when info is not present
+# it is the case of Projection that don't have the info.
+def _pair_grad_sensors_from_ch_names(ch_names):
+    """Find the indexes for pairing grad channels
+
+    Parameters
+    ----------
+    ch_names : list of str
+        A list of channel names.
+
+    Returns
+    -------
+    indexes : list of int
+        Indexes of the grad channels, ordered in pairs.
+    """
+    pairs = defaultdict(list)
+    for i, name in enumerate(ch_names):
+        if name.startswith('MEG'):
+            if name.endswith(('2', '3')):
+                key = name[-4:-1]
+                pairs[key].append(i)
+
+    pairs = [p for p in pairs.values() if len(p) == 2]
+
+    grad_chs = sum(pairs, [])
+    return grad_chs
+
+
+def _merge_grad_data(data):
+    """Merge data from channel pairs using the RMS
+
+    Parameters
+    ----------
+    data : array, shape = (n_channels, n_times)
+        Data for channels, ordered in pairs.
+
+    Returns
+    -------
+    data : array, shape = (n_channels / 2, n_times)
+        The root mean square for each pair.
+    """
+    data = data.reshape((len(data) // 2, 2, -1))
+    data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
+    return data
+
+
+def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
+                       ch_indices=None, name='ecog', bg_image=None):
+    """Generate a custom 2D layout from xy points.
+
+    Generates a 2-D layout for plotting with plot_topo methods and
+    functions. XY points will be normalized between 0 and 1, where
+    normalization extremes will be either the min/max of xy, or
+    the width/height of bg_image.
+
+    Parameters
+    ----------
+    xy : ndarray (N x 2)
+        The xy coordinates of sensor locations.
+    w : float
+        The width of each sensor's axis (between 0 and 1)
+    h : float
+        The height of each sensor's axis (between 0 and 1)
+    pad : float
+        Portion of the box to reserve for padding. The value can range between
+        0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
+    ch_names : list
+        The names of each channel. Must be a list of strings, with one
+        string per channel.
+    ch_indices : list
+        Index of each channel - must be a collection of unique integers,
+        one index per channel.
+    name : string
+        The name of this layout type.
+    bg_image : str | ndarray
+        The image over which sensor axes will be plotted. Either a path to an
+        image file, or an array that can be plotted with plt.imshow. If
+        provided, xy points will be normalized by the width/height of this
+        image. If not, xy points will be normalized by their own min/max.
+
+    Returns
+    -------
+    layout : Layout
+        A Layout object that can be plotted with plot_topo
+        functions and methods.
+
+    See Also
+    --------
+    make_eeg_layout, make_grid_layout
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    from scipy.ndimage import imread
+
+    if ch_indices is None:
+        ch_indices = np.arange(xy.shape[0])
+    if ch_names is None:
+        ch_names = ['{0}'.format(i) for i in ch_indices]
+
+    if len(ch_names) != len(ch_indices):
+        raise ValueError('# ch names and indices must be equal')
+    if len(ch_names) != len(xy):
+        raise ValueError('# ch names and xy vals must be equal')
+
+    x, y = xy.copy().astype(float).T
+
+    # Normalize xy to 0-1
+    if bg_image is not None:
+        # Normalize by image dimensions
+        if isinstance(bg_image, str):
+            img = imread(bg_image)
+        else:
+            img = bg_image
+        x /= img.shape[1]
+        y /= img.shape[0]
+    else:
+        # Normalize x and y by their maxes
+        for i_dim in [x, y]:
+            i_dim -= i_dim.min(0)
+            i_dim /= (i_dim.max(0) - i_dim.min(0))
+
+    # Create box and pos variable
+    box = _box_size(np.vstack([x, y]).T, padding=pad)
+    box = (0, 0, box[0], box[1])
+    w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
+    loc_params = np.vstack([x, y, w, h]).T
+
+    layout = Layout(box, loc_params, ch_names, ch_indices, name)
+    return layout
diff --git a/mne/channels/montage.py b/mne/channels/montage.py
new file mode 100644
index 0000000..b3ac08d
--- /dev/null
+++ b/mne/channels/montage.py
@@ -0,0 +1,533 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#
+# License: Simplified BSD
+
+import os
+import os.path as op
+
+import numpy as np
+
+from ..viz import plot_montage
+from .channels import _contains_ch_type
+from ..transforms import (_sphere_to_cartesian, apply_trans,
+                          get_ras_to_neuromag_trans)
+from ..io.meas_info import _make_dig_points, _read_dig_points
+from ..externals.six import string_types
+from ..externals.six.moves import map
+
+
+class Montage(object):
+    """Montage for EEG cap
+
+    Montages are typically loaded from a file using read_montage. Only use this
+    class directly if you're constructing a new montage.
+
+    Parameters
+    ----------
+    pos : array, shape (n_channels, 3)
+        The positions of the channels in 3d.
+    ch_names : list
+        The channel names.
+    kind : str
+        The type of montage (e.g. 'standard_1005').
+    selection : array of int
+        The indices of the selected channels in the montage file.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    def __init__(self, pos, ch_names, kind, selection):
+        self.pos = pos
+        self.ch_names = ch_names
+        self.kind = kind
+        self.selection = selection
+
+    def __repr__(self):
+        s = '<Montage | %s - %d Channels: %s ...>'
+        s %= self.kind, len(self.ch_names), ', '.join(self.ch_names[:3])
+        return s
+
+    def plot(self, scale_factor=1.5, show_names=False):
+        """Plot EEG sensor montage
+
+        Parameters
+        ----------
+        scale_factor : float
+            Determines the size of the points. Defaults to 1.5
+        show_names : bool
+            Whether to show the channel names. Defaults to False
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            The figure object.
+        """
+        return plot_montage(self, scale_factor=scale_factor,
+                            show_names=show_names)
+
+
+def read_montage(kind, ch_names=None, path=None, unit='m', transform=False):
+    """Read montage from a file
+
+    This function can be used to read electrode positions from a user specified
+    file using the `kind` and `path` parameters. Alternatively, use only the
+    `kind` parameter to load one of the build-in montages:
+
+    ===================   =====================================================
+    Kind                  description
+    ===================   =====================================================
+    standard_1005         Electrodes are named and positioned according to the
+                          international 10-05 system.
+    standard_1020         Electrodes are named and positioned according to the
+                          international 10-20 system.
+    standard_alphabetic   Electrodes are named with LETTER-NUMBER combinations
+                          (A1, B2, F4, etc.)
+    standard_postfixed    Electrodes are named according to the international
+                          10-20 system using postfixes for intermediate
+                          positions.
+    standard_prefixed     Electrodes are named according to the international
+                          10-20 system using prefixes for intermediate
+                          positions.
+    standard_primed       Electrodes are named according to the international
+                          10-20 system using prime marks (' and '') for
+                          intermediate positions.
+
+    biosemi16             BioSemi cap with 16 electrodes
+    biosemi32             BioSemi cap with 32 electrodes
+    biosemi64             BioSemi cap with 64 electrodes
+    biosemi128            BioSemi cap with 128 electrodes
+    biosemi160            BioSemi cap with 160 electrodes
+    biosemi256            BioSemi cap with 256 electrodes
+
+    easycap-M10           Brainproducts EasyCap with electrodes named
+                          according to the 10-05 system
+    easycap-M1            Brainproduct EasyCap with numbered electrodes
+
+    EGI_256               Geodesic Sensor Net with 256 channels
+
+    GSN-HydroCel-32       HydroCel Geodesic Sensor Net with 32 electrodes
+    GSN-HydroCel-64_1.0   HydroCel Geodesic Sensor Net with 64 electrodes
+    GSN-HydroCel-65_1.0   HydroCel Geodesic Sensor Net with 64 electrodes + Cz
+    GSN-HydroCel-128      HydroCel Geodesic Sensor Net with 128 electrodes
+    GSN-HydroCel-129      HydroCel Geodesic Sensor Net with 128 electrodes + Cz
+    GSN-HydroCel-256      HydroCel Geodesic Sensor Net with 256 electrodes
+    GSN-HydroCel-257      HydroCel Geodesic Sensor Net with 256 electrodes + Cz
+    ===================   =====================================================
+
+    Parameters
+    ----------
+    kind : str
+        The name of the montage file (e.g. kind='easycap-M10' for
+        'easycap-M10.txt'). Files with extensions '.elc', '.txt', '.csd',
+        '.elp', '.hpts' or '.sfp' are supported.
+    ch_names : list of str | None
+        If not all electrodes defined in the montage are present in the EEG
+        data, use this parameter to select subset of electrode positions to
+        load. If None (default), all defined electrode positions are returned.
+    path : str | None
+        The path of the folder containing the montage file. Defaults to the
+        mne/channels/data/montages folder in your mne-python installation.
+    unit : 'm' | 'cm' | 'mm'
+        Unit of the input file. If not 'm' (default), coordinates will be
+        rescaled to 'm'.
+    transform : bool
+        If True, points will be transformed to Neuromag space.
+        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
+        the montage file. Useful for points captured using Polhemus FastSCAN.
+        Default is False.
+
+    Returns
+    -------
+    montage : instance of Montage
+        The montage.
+
+    Notes
+    -----
+    Built-in montages are not scaled or transformed by default.
+
+    .. versionadded:: 0.9.0
+    """
+
+    if path is None:
+        path = op.join(op.dirname(__file__), 'data', 'montages')
+    if not op.isabs(kind):
+        supported = ('.elc', '.txt', '.csd', '.sfp', '.elp', '.hpts')
+        montages = [op.splitext(f) for f in os.listdir(path)]
+        montages = [m for m in montages if m[1] in supported and kind == m[0]]
+        if len(montages) != 1:
+            raise ValueError('Could not find the montage. Please provide the '
+                             'full path.')
+        kind, ext = montages[0]
+        fname = op.join(path, kind + ext)
+    else:
+        kind, ext = op.splitext(kind)
+        fname = op.join(path, kind + ext)
+
+    if ext == '.sfp':
+        # EGI geodesic
+        dtype = np.dtype('S4, f8, f8, f8')
+        data = np.loadtxt(fname, dtype=dtype)
+        pos = np.c_[data['f1'], data['f2'], data['f3']]
+        ch_names_ = data['f0'].astype(np.str)
+    elif ext == '.elc':
+        # 10-5 system
+        ch_names_ = []
+        pos = []
+        with open(fname) as fid:
+            for line in fid:
+                if 'Positions\n' in line:
+                    break
+            pos = []
+            for line in fid:
+                if 'Labels\n' in line:
+                    break
+                pos.append(list(map(float, line.split())))
+            for line in fid:
+                if not line or not set(line) - set([' ']):
+                    break
+                ch_names_.append(line.strip(' ').strip('\n'))
+        pos = np.array(pos)
+    elif ext == '.txt':
+        # easycap
+        try:  # newer version
+            data = np.genfromtxt(fname, dtype='str', skip_header=1)
+        except TypeError:
+            data = np.genfromtxt(fname, dtype='str', skiprows=1)
+        ch_names_ = list(data[:, 0])
+        theta, phi = data[:, 1].astype(float), data[:, 2].astype(float)
+        x = 85. * np.cos(np.deg2rad(phi)) * np.sin(np.deg2rad(theta))
+        y = 85. * np.sin(np.deg2rad(theta)) * np.sin(np.deg2rad(phi))
+        z = 85. * np.cos(np.deg2rad(theta))
+        pos = np.c_[x, y, z]
+    elif ext == '.csd':
+        # CSD toolbox
+        dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
+                 ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
+                 ('off_sph', 'f8')]
+        try:  # newer version
+            table = np.loadtxt(fname, skip_header=2, dtype=dtype)
+        except TypeError:
+            table = np.loadtxt(fname, skiprows=2, dtype=dtype)
+        ch_names_ = table['label']
+        theta = (2 * np.pi * table['theta']) / 360.
+        phi = (2 * np.pi * table['phi']) / 360.
+        pos = _sphere_to_cartesian(theta, phi, r=1.0)
+        pos = np.asarray(pos).T
+    elif ext == '.elp':
+        # standard BESA spherical
+        dtype = np.dtype('S8, S8, f8, f8, f8')
+        try:
+            data = np.loadtxt(fname, dtype=dtype, skip_header=1)
+        except TypeError:
+            data = np.loadtxt(fname, dtype=dtype, skiprows=1)
+
+        az = data['f2']
+        horiz = data['f3']
+
+        radius = np.abs(az / 180.)
+        angles = np.array([90. - h if a >= 0. else -90. - h
+                           for h, a in zip(horiz, az)])
+
+        sph_phi = (0.5 - radius) * 180.
+        sph_theta = angles
+
+        azimuth = sph_theta / 180.0 * np.pi
+        elevation = sph_phi / 180.0 * np.pi
+        r = 85.
+
+        y, x, z = _sphere_to_cartesian(azimuth, elevation, r)
+
+        pos = np.c_[x, y, z]
+        ch_names_ = data['f1'].astype(np.str)
+    elif ext == '.hpts':
+        # MNE-C specified format for generic digitizer data
+        dtype = [('type', 'S8'), ('name', 'S8'),
+                 ('x', 'f8'), ('y', 'f8'), ('z', 'f8')]
+        data = np.loadtxt(fname, dtype=dtype)
+        pos = np.vstack((data['x'], data['y'], data['z'])).T
+        ch_names_ = data['name'].astype(np.str)
+    else:
+        raise ValueError('Currently the "%s" template is not supported.' %
+                         kind)
+    selection = np.arange(len(pos))
+
+    if unit == 'mm':
+        pos /= 1e3
+    elif unit == 'cm':
+        pos /= 1e2
+    elif unit != 'm':
+        raise ValueError("'unit' should be either 'm', 'cm', or 'mm'.")
+    if transform:
+        names_lower = [name.lower() for name in list(ch_names_)]
+        if ext == '.hpts':
+            fids = ('2', '1', '3')  # Alternate cardinal point names
+        else:
+            fids = ('nz', 'lpa', 'rpa')
+
+        missing = [name for name in fids
+                   if name not in names_lower]
+        if missing:
+            raise ValueError("The points %s are missing, but are needed "
+                             "to transform the points to the MNE coordinate "
+                             "system. Either add the points, or read the "
+                             "montage with transform=False. " % missing)
+        nasion = pos[names_lower.index(fids[0])]
+        lpa = pos[names_lower.index(fids[1])]
+        rpa = pos[names_lower.index(fids[2])]
+
+        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+        pos = apply_trans(neuromag_trans, pos)
+
+    if ch_names is not None:
+        sel, ch_names_ = zip(*[(i, e) for i, e in enumerate(ch_names_)
+                             if e in ch_names])
+        sel = list(sel)
+        pos = pos[sel]
+        selection = selection[sel]
+    else:
+        ch_names_ = list(ch_names_)
+    kind = op.split(kind)[-1]
+    return Montage(pos=pos, ch_names=ch_names_, kind=kind, selection=selection)
+
+
+class DigMontage(object):
+    """Montage for Digitized data
+
+    Montages are typically loaded from a file using read_dig_montage. Only use
+    this class directly if you're constructing a new montage.
+
+    Parameters
+    ----------
+    hsp : array, shape (n_points, 3)
+        The positions of the channels in 3d.
+    hpi : array, shape (n_hpi, 3)
+        The positions of the head-position indicator coils in 3d.
+        These points are in the MEG device space.
+    elp : array, shape (n_hpi, 3)
+        The positions of the head-position indicator coils in 3d.
+        This is typically in the acquisition digitizer space.
+    point_names : list, shape (n_elp)
+        The names of the digitized points for hpi and elp.
+    nasion : array, shape (1, 3)
+        The position of the nasion fidicual point in the RAS head space.
+    lpa : array, shape (1, 3)
+        The position of the left periauricular fidicual point in
+        the RAS head space.
+    rpa : array, shape (1, 3)
+        The position of the right periauricular fidicual point in
+        the RAS head space.
+    dev_head_t : array, shape (4, 4)
+        A Device-to-Head transformation matrix.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    def __init__(self, hsp, hpi, elp, point_names,
+                 nasion=None, lpa=None, rpa=None, dev_head_t=None):
+        self.hsp = hsp
+        self.hpi = hpi
+        self.elp = elp
+        self.point_names = point_names
+
+        self.nasion = nasion
+        self.lpa = lpa
+        self.rpa = rpa
+        if dev_head_t is None:
+            self.dev_head_t = np.identity(4)
+        else:
+            self.dev_head_t = dev_head_t
+
+    def __repr__(self):
+        s = '<DigMontage | %d Dig Points, %d HPI points: %s ...>'
+        s %= (len(self.hsp), len(self.point_names),
+              ', '.join(self.point_names[:3]))
+        return s
+
+    def plot(self, scale_factor=1.5, show_names=False):
+        """Plot EEG sensor montage
+
+        Parameters
+        ----------
+        scale_factor : float
+            Determines the size of the points. Defaults to 1.5
+        show_names : bool
+            Whether to show the channel names. Defaults to False
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            The figure object.
+        """
+        from ..viz import plot_montage
+        return plot_montage(self, scale_factor=scale_factor,
+                            show_names=show_names)
+
+
+def read_dig_montage(hsp=None, hpi=None, elp=None, point_names=None,
+                     unit='mm', transform=True, dev_head_t=False):
+    """Read montage from a file
+
+    Parameters
+    ----------
+    hsp : None | str | array, shape (n_points, 3)
+        If str, this corresponds to the filename of the headshape points.
+        This is typically used with the Polhemus FastSCAN system.
+        If numpy.array, this corresponds to an array of positions of the
+        channels in 3d.
+    hpi : None | str | array, shape (n_hpi, 3)
+        If str, this corresponds to the filename of hpi points. If numpy.array,
+        this corresponds to an array hpi points. These points are in
+        device space.
+    elp : None | str | array, shape (n_fids + n_hpi, 3)
+        If str, this corresponds to the filename of hpi points.
+        This is typically used with the Polhemus FastSCAN system.
+        If numpy.array, this corresponds to an array hpi points. These points
+        are in head space. Fiducials should be listed first, then the points
+        corresponding to the hpi.
+    point_names : None | list
+        If list, this corresponds to a list of point names. This must be
+        specified if elp is defined.
+    unit : 'm' | 'cm' | 'mm'
+        Unit of the input file. If not 'm', coordinates will be rescaled
+        to 'm'. Default is 'mm'. This is applied only for hsp and elp files.
+    transform : bool
+        If True, points will be transformed to Neuromag space.
+        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
+        the montage file. Useful for points captured using Polhemus FastSCAN.
+        Default is True.
+    dev_head_t : bool
+        If True, a Dev-to-Head transformation matrix will be added to the
+        montage. To get a proper `dev_head_t`, the hpi and the elp points
+        must be in the same order. If False, an identity matrix will be added
+        to the montage. Default is False.
+
+
+    Returns
+    -------
+    montage : instance of DigMontage
+        The digitizer montage.
+
+    Notes
+    -----
+    All digitized points will be transformed to head-based coordinate system
+    if transform is True and fiducials are present.
+
+    .. versionadded:: 0.9.0
+    """
+    if isinstance(hsp, string_types):
+        hsp = _read_dig_points(hsp)
+    if hsp is not None:
+        if unit == 'mm':
+            hsp *= 1e-3
+        if unit == 'cm':
+            hsp *= 1e-2
+    if isinstance(hpi, string_types):
+        ext = op.splitext(hpi)[-1]
+        if ext == '.txt':
+            hpi = _read_dig_points(hpi)
+        elif ext in ('.sqd', '.mrk'):
+            from ..io.kit import read_mrk
+            hpi = read_mrk(hpi)
+        else:
+            raise TypeError('HPI file is not supported.')
+    if isinstance(elp, string_types):
+        elp = _read_dig_points(elp)
+    if elp is not None:
+        if len(elp) != len(point_names):
+            raise ValueError("The elp file contains %i points, but %i names "
+                             "were specified." % (len(elp), len(point_names)))
+        if unit == 'mm':
+            elp *= 1e-3
+        elif unit == 'cm':
+            elp *= 1e-2
+
+    if transform:
+        if elp is None:
+            raise ValueError("ELP points are not specified. Points are needed "
+                             "for transformation.")
+        names_lower = [name.lower() for name in point_names]
+
+        # check that all needed points are present
+        missing = tuple(name for name in ('nasion', 'lpa', 'rpa')
+                        if name not in names_lower)
+        if missing:
+            raise ValueError("The points %s are missing, but are needed "
+                             "to transform the points to the MNE coordinate "
+                             "system. Either add the points, or read the "
+                             "montage with transform=False." % str(missing))
+
+        nasion = elp[names_lower.index('nasion')]
+        lpa = elp[names_lower.index('lpa')]
+        rpa = elp[names_lower.index('rpa')]
+        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+
+        fids = np.array([nasion, lpa, rpa])
+        fids = apply_trans(neuromag_trans, fids)
+        elp = apply_trans(neuromag_trans, elp)
+        hsp = apply_trans(neuromag_trans, hsp)
+    else:
+        fids = [None] * 3
+    if dev_head_t:
+        from ..coreg import fit_matched_points
+        trans = fit_matched_points(tgt_pts=elp[3:], src_pts=hpi, out='trans')
+    else:
+        trans = np.identity(4)
+
+    return DigMontage(hsp, hpi, elp, point_names, fids[0], fids[1], fids[2],
+                      trans)
+
+
+def _set_montage(info, montage):
+    """Apply montage to data.
+
+    With a Montage, this function will replace the EEG channel names and
+    locations with the values specified for the particular montage.
+
+    With a DigMontage, this function will replace the digitizer info with
+    the values specified for the particular montage.
+
+    Note: This function will change the info variable in place.
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement info to update.
+    montage : instance of Montage
+        The montage to apply.
+    """
+    if isinstance(montage, Montage):
+        if not _contains_ch_type(info, 'eeg'):
+            raise ValueError('No EEG channels found.')
+
+        sensors_found = False
+        for pos, ch_name in zip(montage.pos, montage.ch_names):
+            if ch_name not in info['ch_names']:
+                continue
+
+            ch_idx = info['ch_names'].index(ch_name)
+            info['ch_names'][ch_idx] = ch_name
+            info['chs'][ch_idx]['loc'] = np.r_[pos, [0.] * 9]
+            sensors_found = True
+
+        if not sensors_found:
+            raise ValueError('None of the sensors defined in the montage were '
+                             'found in the info structure. Check the channel '
+                             'names.')
+    elif isinstance(montage, DigMontage):
+        dig = _make_dig_points(nasion=montage.nasion, lpa=montage.lpa,
+                               rpa=montage.rpa, hpi=montage.hpi,
+                               dig_points=montage.hsp)
+        info['dig'] = dig
+        info['dev_head_t']['trans'] = montage.dev_head_t
+    else:
+        raise TypeError("Montage must be a 'Montage' or 'DigMontage' "
+                        "instead of '%s'." % type(montage))
diff --git a/doc/sphinxext/numpy_ext_old/__init__.py b/mne/channels/tests/__init__.py
similarity index 100%
copy from doc/sphinxext/numpy_ext_old/__init__.py
copy to mne/channels/tests/__init__.py
diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py
new file mode 100644
index 0000000..3a37858
--- /dev/null
+++ b/mne/channels/tests/test_channels.py
@@ -0,0 +1,152 @@
+# Author: Daniel G Wakeman <dwakeman at nmr.mgh.harvard.edu>
+#         Denis A. Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_array_equal
+from nose.tools import assert_raises, assert_true, assert_equal
+
+from mne.channels import rename_channels, read_ch_connectivity
+from mne.channels.channels import _ch_neighbor_connectivity
+from mne.io import read_info, Raw
+from mne.io.constants import FIFF
+from mne.fixes import partial, savemat
+from mne.utils import _TempDir, run_tests_if_main
+from mne import pick_types
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+
+def test_rename_channels():
+    """Test rename channels
+    """
+    info = read_info(raw_fname)
+    # Error Tests
+    # Test channel name exists in ch_names
+    mapping = {'EEG 160': 'EEG060'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test improper mapping configuration
+    mapping = {'MEG 2641': 1.0}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test non-unique mapping configuration
+    mapping = {'MEG 2641': 'MEG 2642'}
+    assert_raises(ValueError, rename_channels, info, mapping)
+    # Test bad input
+    assert_raises(ValueError, rename_channels, info, 1.)
+
+    # Test successful changes
+    # Test ch_name and ch_names are changed
+    info2 = deepcopy(info)  # for consistency at the start of each test
+    info2['bads'] = ['EEG 060', 'EOG 061']
+    mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
+    rename_channels(info2, mapping)
+    assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
+    assert_true(info2['ch_names'][374] == 'EEG060')
+    assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
+    assert_true(info2['ch_names'][375] == 'EOG061')
+    assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
+    info2 = deepcopy(info)
+    rename_channels(info2, lambda x: x.replace(' ', ''))
+    assert_true(info2['chs'][373]['ch_name'] == 'EEG059')
+    info2 = deepcopy(info)
+    info2['bads'] = ['EEG 060', 'EEG 060']
+    rename_channels(info2, mapping)
+    assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
+
+
+def test_set_channel_types():
+    """Test set_channel_types
+    """
+    raw = Raw(raw_fname)
+    # Error Tests
+    # Test channel name exists in ch_names
+    mapping = {'EEG 160': 'EEG060'}
+    assert_raises(ValueError, raw.set_channel_types, mapping)
+    # Test change to illegal channel type
+    mapping = {'EOG 061': 'xxx'}
+    assert_raises(ValueError, raw.set_channel_types, mapping)
+    # Test type change
+    raw2 = Raw(raw_fname)
+    raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
+    mapping = {'EEG 060': 'eog', 'EEG 059': 'ecg', 'EOG 061': 'seeg'}
+    raw2.set_channel_types(mapping)
+    info = raw2.info
+    assert_true(info['chs'][374]['ch_name'] == 'EEG 060')
+    assert_true(info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH)
+    assert_true(info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE)
+    assert_true(info['chs'][373]['ch_name'] == 'EEG 059')
+    assert_true(info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH)
+    assert_true(info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE)
+    assert_true(info['chs'][375]['ch_name'] == 'EOG 061')
+    assert_true(info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH)
+    assert_true(info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V)
+    assert_true(info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG)
+
+
+def test_read_ch_connectivity():
+    "Test reading channel connectivity templates"
+    tempdir = _TempDir()
+    a = partial(np.array, dtype='<U7')
+    # no pep8
+    nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
+                     (['MEG0121'], [[a(['MEG0111'])],
+                                    [a(['MEG0131'])]]),
+                     (['MEG0131'], [[a(['MEG0111'])],
+                                    [a(['MEG0121'])]])]],
+                   dtype=[('label', 'O'), ('neighblabel', 'O')])
+    mat = dict(neighbours=nbh)
+    mat_fname = op.join(tempdir, 'test_mat.mat')
+    savemat(mat_fname, mat, oned_as='row')
+
+    ch_connectivity, ch_names = read_ch_connectivity(mat_fname)
+    x = ch_connectivity
+    assert_equal(x.shape[0], len(ch_names))
+    assert_equal(x.shape, (3, 3))
+    assert_equal(x[0, 1], False)
+    assert_equal(x[0, 2], True)
+    assert_true(np.all(x.diagonal()))
+    assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
+    ch_connectivity, ch_names = read_ch_connectivity(mat_fname, picks=[0, 2])
+    assert_equal(ch_connectivity.shape[0], 2)
+    assert_equal(len(ch_names), 2)
+
+    ch_names = ['EEG01', 'EEG02', 'EEG03']
+    neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
+    neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names[:2],
+                  neighbors)
+    neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
+    assert_raises(ValueError, _ch_neighbor_connectivity, ch_names, neighbors)
+    connectivity, ch_names = read_ch_connectivity('neuromag306mag')
+    assert_equal(connectivity.shape, (102, 102))
+    assert_equal(len(ch_names), 102)
+    assert_raises(ValueError, read_ch_connectivity, 'bananas!')
+
+
+def test_get_set_sensor_positions():
+    """Test get/set functions for sensor positions
+    """
+    raw1 = Raw(raw_fname)
+    picks = pick_types(raw1.info, meg=False, eeg=True)
+    pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
+    raw_pos = raw1._get_channel_positions(picks=picks)
+    assert_array_equal(raw_pos, pos)
+
+    ch_name = raw1.info['ch_names'][13]
+    assert_raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
+    raw2 = Raw(raw_fname)
+    raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
+    raw1._set_channel_positions([[1, 2, 3]], [ch_name])
+    assert_array_equal(raw1.info['chs'][13]['loc'],
+                       raw2.info['chs'][13]['loc'])
+
+run_tests_if_main()
diff --git a/mne/channels/tests/test_interpolation.py b/mne/channels/tests/test_interpolation.py
new file mode 100644
index 0000000..2b2a881
--- /dev/null
+++ b/mne/channels/tests/test_interpolation.py
@@ -0,0 +1,120 @@
+import os.path as op
+import numpy as np
+from numpy.testing import (assert_allclose, assert_array_equal)
+from nose.tools import assert_raises, assert_equal, assert_true
+
+from mne import io, pick_types, pick_channels, read_events, Epochs
+from mne.channels.interpolation import _make_interpolation_matrix
+from mne.utils import run_tests_if_main, slow_test
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+
+
+def _load_data():
+    """Helper function to load data."""
+    # It is more memory efficient to load data in a separate
+    # function so it's loaded on-demand
+    raw = io.Raw(raw_fname, add_eeg_ref=False)
+    events = read_events(event_name)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude=[])
+    # select every second channel for faster speed but compensate by using
+    # mode='accurate'.
+    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude=[])[1::2]
+    picks = pick_types(raw.info, meg=True, eeg=True, exclude=[])
+
+    epochs_eeg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_eeg,
+                        preload=True, reject=dict(eeg=80e-6))
+    epochs_meg = Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,
+                        preload=True, reject=dict(grad=1000e-12, mag=4e-12))
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=dict(eeg=80e-6, grad=1000e-12,
+                                              mag=4e-12))
+    return raw, epochs, epochs_eeg, epochs_meg
+
+
+ at slow_test
+def test_interpolation():
+    """Test interpolation"""
+    raw, epochs, epochs_eeg, epochs_meg = _load_data()
+
+    # It's a trade of between speed and accuracy. If every second channel is
+    # selected the tests are more than 3x faster but the correlation
+    # drops to 0.8
+    thresh = 0.80
+
+    # create good and bad channels for EEG
+    epochs_eeg.info['bads'] = []
+    goods_idx = np.ones(len(epochs_eeg.ch_names), dtype=bool)
+    goods_idx[epochs_eeg.ch_names.index('EEG 012')] = False
+    bads_idx = ~goods_idx
+
+    evoked_eeg = epochs_eeg.average()
+    ave_before = evoked_eeg.data[bads_idx]
+
+    # interpolate bad channels for EEG
+    pos = epochs_eeg._get_channel_positions()
+    pos_good = pos[goods_idx]
+    pos_bad = pos[bads_idx]
+    interpolation = _make_interpolation_matrix(pos_good, pos_bad)
+    assert_equal(interpolation.shape, (1, len(epochs_eeg.ch_names) - 1))
+    ave_after = np.dot(interpolation, evoked_eeg.data[goods_idx])
+
+    epochs_eeg.info['bads'] = ['EEG 012']
+    evoked_eeg = epochs_eeg.average()
+    assert_array_equal(ave_after, evoked_eeg.interpolate_bads().data[bads_idx])
+
+    assert_allclose(ave_before, ave_after, atol=2e-6)
+
+    # check that interpolation fails when preload is False
+    epochs_eeg.preload = False
+    assert_raises(ValueError,  epochs_eeg.interpolate_bads)
+    epochs_eeg.preload = True
+
+    # check that interpolation changes the data in raw
+    raw_eeg = io.RawArray(data=epochs_eeg._data[0], info=epochs_eeg.info)
+    raw_before = raw_eeg._data[bads_idx]
+    raw_after = raw_eeg.interpolate_bads()._data[bads_idx]
+    assert_equal(np.all(raw_before == raw_after), False)
+
+    # check that interpolation fails when preload is False
+    for inst in [raw, epochs]:
+        assert hasattr(inst, 'preload')
+        inst.preload = False
+        inst.info['bads'] = [inst.ch_names[1]]
+        assert_raises(ValueError, inst.interpolate_bads)
+
+    # check that interpolation works for MEG
+    epochs_meg.info['bads'] = ['MEG 0141']
+    evoked = epochs_meg.average()
+    pick = pick_channels(epochs_meg.info['ch_names'], epochs_meg.info['bads'])
+
+    # MEG -- raw
+    raw_meg = io.RawArray(data=epochs_meg._data[0], info=epochs_meg.info)
+    raw_meg.info['bads'] = ['MEG 0141']
+    data1 = raw_meg[pick, :][0][0]
+    # reset_bads=False here because epochs_meg appears to share the same info
+    # dict with raw and we want to test the epochs functionality too
+    data2 = raw_meg.interpolate_bads(reset_bads=False)[pick, :][0][0]
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+    # the same number of bads as before
+    assert_true(len(raw_meg.info['bads']) == len(raw_meg.info['bads']))
+
+    # MEG -- epochs
+    data1 = epochs_meg.get_data()[:, pick, :].ravel()
+    epochs_meg.interpolate_bads()
+    data2 = epochs_meg.get_data()[:, pick, :].ravel()
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+    assert_true(len(raw_meg.info['bads']) == 0)
+
+    # MEG -- evoked
+    data1 = evoked.data[pick]
+    data2 = evoked.interpolate_bads().data[pick]
+    assert_true(np.corrcoef(data1, data2)[0, 1] > thresh)
+
+run_tests_if_main()
diff --git a/mne/channels/tests/test_layout.py b/mne/channels/tests/test_layout.py
new file mode 100644
index 0000000..ccc388d
--- /dev/null
+++ b/mne/channels/tests/test_layout.py
@@ -0,0 +1,380 @@
+from __future__ import print_function
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: Simplified BSD
+
+import copy
+import os.path as op
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
+from nose.tools import assert_true, assert_raises
+from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
+                          find_layout)
+from mne.channels.layout import (_box_size, _auto_topomap_coords,
+                                 generate_2d_layout)
+from mne.utils import run_tests_if_main
+from mne import pick_types, pick_info
+from mne.io import Raw, read_raw_kit
+from mne.io.meas_info import _empty_info
+from mne.io.constants import FIFF
+from mne.preprocessing.maxfilter import fit_sphere_to_headshape
+from mne.utils import _TempDir
+
+warnings.simplefilter('always')
+
+fif_fname = op.join(op.dirname(__file__), '..', '..', 'io',
+                    'tests', 'data', 'test_raw.fif')
+
+lout_path = op.join(op.dirname(__file__), '..', '..', 'io',
+                    'tests', 'data')
+
+bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
+                  'tests', 'data')
+
+fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                        'data', 'test_ctf_comp_raw.fif')
+
+fname_kit_157 = op.join(op.dirname(__file__), '..', '..',  'io', 'kit',
+                        'tests', 'data', 'test.sqd')
+
+test_info = _empty_info()
+test_info.update({
+    'ch_names': ['ICA 001', 'ICA 002', 'EOG 061'],
+    'chs': [{'cal': 1,
+             'ch_name': 'ICA 001',
+             'coil_type': 0,
+             'coord_Frame': 0,
+             'kind': 502,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 1,
+             'range': 1.0,
+             'scanno': 1,
+             'unit': -1,
+             'unit_mul': 0},
+            {'cal': 1,
+             'ch_name': 'ICA 002',
+             'coil_type': 0,
+             'coord_Frame': 0,
+             'kind': 502,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 2,
+             'range': 1.0,
+             'scanno': 2,
+             'unit': -1,
+             'unit_mul': 0},
+            {'cal': 0.002142000012099743,
+             'ch_name': 'EOG 061',
+             'coil_type': 1,
+             'coord_frame': 0,
+             'kind': 202,
+             'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
+                             dtype=np.float32),
+             'logno': 61,
+             'range': 1.0,
+             'scanno': 376,
+             'unit': 107,
+             'unit_mul': 0}],
+    'nchan': 3})
+
+
+def test_io_layout_lout():
+    """Test IO with .lout files"""
+    tempdir = _TempDir()
+    layout = read_layout('Vectorview-all', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lout'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+    print(layout)  # test repr
+
+
+def test_io_layout_lay():
+    """Test IO with .lay files"""
+    tempdir = _TempDir()
+    layout = read_layout('CTF151', scale=False)
+    layout.save(op.join(tempdir, 'foobar.lay'))
+    layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
+                              scale=False)
+    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
+    assert_true(layout.names, layout_read.names)
+
+
+def test_auto_topomap_coords():
+    """Test mapping of coordinates in 3D space to 2D"""
+    info = Raw(fif_fname).info.copy()
+    picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
+
+    # Remove extra digitization point, so EEG digitization points match up
+    # with the EEG channels
+    del info['dig'][85]
+
+    # Remove head origin from channel locations, so mapping with digitization
+    # points yields the same result
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
+                 FIFF.FIFFV_POINT_EEG,
+                 FIFF.FIFFV_POINT_EXTRA)
+    _, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds)
+    origin_head /= 1000.  # to meters
+    for ch in info['chs']:
+        ch['loc'][:3] -= origin_head
+
+    # Use channel locations
+    l0 = _auto_topomap_coords(info, picks)
+
+    # Remove electrode position information, use digitization points from now
+    # on.
+    for ch in info['chs']:
+        ch['loc'].fill(0)
+
+    l1 = _auto_topomap_coords(info, picks)
+    assert_allclose(l1, l0, atol=1e-3)
+
+    # Test plotting mag topomap without channel locations: it should fail
+    mag_picks = pick_types(info, meg='mag')
+    assert_raises(ValueError, _auto_topomap_coords, info, mag_picks)
+
+    # Test function with too many EEG digitization points: it should fail
+    info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Test function with too little EEG digitization points: it should fail
+    info['dig'] = info['dig'][:-2]
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Electrode positions must be unique
+    info['dig'].append(info['dig'][-1])
+    assert_raises(ValueError, _auto_topomap_coords, info, picks)
+
+    # Test function without EEG digitization points: it should fail
+    info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+
+    # Test function without any digitization points, it should fail
+    info['dig'] = None
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+    info['dig'] = []
+    assert_raises(RuntimeError, _auto_topomap_coords, info, picks)
+
+
+def test_make_eeg_layout():
+    """Test creation of EEG layout"""
+    tempdir = _TempDir()
+    tmp_name = 'foo'
+    lout_name = 'test_raw'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    info = Raw(fif_fname).info
+    info['bads'].append(info['ch_names'][360])
+    layout = make_eeg_layout(info, exclude=[])
+    assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
+                                               if ch.startswith('EE')]))
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_allclose(layout.pos, lout_new.pos, atol=0.1)
+    assert_array_equal(lout_orig.names, lout_new.names)
+
+    # Test input validation
+    assert_raises(ValueError, make_eeg_layout, info, radius=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, radius=0.6)
+    assert_raises(ValueError, make_eeg_layout, info, width=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, width=1.1)
+    assert_raises(ValueError, make_eeg_layout, info, height=-0.1)
+    assert_raises(ValueError, make_eeg_layout, info, height=1.1)
+
+
+def test_make_grid_layout():
+    """Test creation of grid layout"""
+    tempdir = _TempDir()
+    tmp_name = 'bar'
+    lout_name = 'test_ica'
+    lout_orig = read_layout(kind=lout_name, path=lout_path)
+    layout = make_grid_layout(test_info)
+    layout.save(op.join(tempdir, tmp_name + '.lout'))
+    lout_new = read_layout(kind=tmp_name, path=tempdir)
+    assert_array_equal(lout_new.kind, tmp_name)
+    assert_array_equal(lout_orig.pos, lout_new.pos)
+    assert_array_equal(lout_orig.names, lout_new.names)
+
+    # Test creating grid layout with specified number of columns
+    layout = make_grid_layout(test_info, n_col=2)
+    # Vertical positions should be equal
+    assert_true(layout.pos[0, 1] == layout.pos[1, 1])
+    # Horizontal positions should be unequal
+    assert_true(layout.pos[0, 0] != layout.pos[1, 0])
+    # Box sizes should be equal
+    assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
+
+
+def test_find_layout():
+    """Test finding layout"""
+    assert_raises(ValueError, find_layout, test_info, ch_type='meep')
+
+    sample_info = Raw(fif_fname).info
+    grads = pick_types(sample_info, meg='grad')
+    sample_info2 = pick_info(sample_info, grads)
+
+    mags = pick_types(sample_info, meg='mag')
+    sample_info3 = pick_info(sample_info, mags)
+
+    # mock new convention
+    sample_info4 = copy.deepcopy(sample_info)
+    for ii, name in enumerate(sample_info4['ch_names']):
+        new = name.replace(' ', '')
+        sample_info4['ch_names'][ii] = new
+        sample_info4['chs'][ii]['ch_name'] = new
+
+    eegs = pick_types(sample_info, meg=False, eeg=True)
+    sample_info5 = pick_info(sample_info, eegs)
+
+    lout = find_layout(sample_info, ch_type=None)
+    assert_true(lout.kind == 'Vectorview-all')
+    assert_true(all(' ' in k for k in lout.names))
+
+    lout = find_layout(sample_info2, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    # test new vector-view
+    lout = find_layout(sample_info4, ch_type=None)
+    assert_true(lout.kind == 'Vectorview-all')
+    assert_true(all(' ' not in k for k in lout.names))
+
+    lout = find_layout(sample_info, ch_type='grad')
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2)
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2, ch_type='grad')
+    assert_true(lout.kind == 'Vectorview-grad')
+    lout = find_layout(sample_info2, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    lout = find_layout(sample_info, ch_type='mag')
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3)
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3, ch_type='mag')
+    assert_true(lout.kind == 'Vectorview-mag')
+    lout = find_layout(sample_info3, ch_type='meg')
+    assert_true(lout.kind == 'Vectorview-all')
+
+    lout = find_layout(sample_info, ch_type='eeg')
+    assert_true(lout.kind == 'EEG')
+    lout = find_layout(sample_info5)
+    assert_true(lout.kind == 'EEG')
+    lout = find_layout(sample_info5, ch_type='eeg')
+    assert_true(lout.kind == 'EEG')
+    # no common layout, 'meg' option not supported
+
+    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
+    lout = find_layout(Raw(fname_bti_raw).info)
+    assert_true(lout.kind == 'magnesWH3600')
+
+    lout = find_layout(Raw(fname_ctf_raw).info)
+    assert_true(lout.kind == 'CTF-275')
+
+    lout = find_layout(read_raw_kit(fname_kit_157).info)
+    assert_true(lout.kind == 'KIT-157')
+
+
+def test_box_size():
+    """Test calculation of box sizes."""
+    # No points. Box size should be 1,1.
+    assert_allclose(_box_size([]), (1.0, 1.0))
+
+    # Create one point. Box size should be 1,1.
+    point = [(0, 0)]
+    assert_allclose(_box_size(point), (1.0, 1.0))
+
+    # Create two points. Box size should be 0.5,1.
+    points = [(0.25, 0.5), (0.75, 0.5)]
+    assert_allclose(_box_size(points), (0.5, 1.0))
+
+    # Create three points. Box size should be (0.5, 0.5).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points), (0.5, 0.5))
+
+    # Create a grid of points. Box size should be (0.1, 0.1).
+    x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
+    x, y = x.ravel(), y.ravel()
+    assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
+
+    # Create a random set of points. This should never break the function.
+    rng = np.random.RandomState(42)
+    points = rng.rand(100, 2)
+    width, height = _box_size(points)
+    assert_true(width is not None)
+    assert_true(height is not None)
+
+    # Test specifying an existing width.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
+
+    # Test specifying an existing width that has influence on the calculated
+    # height.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
+
+    # Test specifying an existing height.
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
+
+    # Test specifying an existing height that has influence on the calculated
+    # width.
+    points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
+    assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
+
+    # Test specifying both width and height. The function should simply return
+    # these.
+    points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
+    assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
+
+    # Test specifying a width that will cause unfixable horizontal overlap and
+    # essentially breaks the function (height will be 0).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_array_equal(_box_size(points, width=1), (1, 0))
+
+    # Test adding some padding.
+    # Create three points. Box size should be a little less than (0.5, 0.5).
+    points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
+    assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
+
+
+def test_generate_2d_layout():
+    """Test creation of a layout from 2d points."""
+    snobg = 10
+    sbg = 15
+    side = range(snobg)
+    bg_image = np.random.randn(sbg, sbg)
+    w, h = [.2, .5]
+
+    # Generate fake data
+    xy = np.array([(i, j) for i in side for j in side])
+    lt = generate_2d_layout(xy, w=w, h=h)
+
+    # Correct points ordering / minmaxing
+    comp_1, comp_2 = [(5, 0), (7, 0)]
+    assert_true(lt.pos[:, :2].max() == 1)
+    assert_true(lt.pos[:, :2].min() == 0)
+    with np.errstate(invalid='ignore'):  # divide by zero
+        assert_allclose(xy[comp_2] / float(xy[comp_1]),
+                        lt.pos[comp_2] / float(lt.pos[comp_1]))
+    assert_allclose(lt.pos[0, [2, 3]], [w, h])
+
+    # Correct number elements
+    assert_true(lt.pos.shape[1] == 4)
+    assert_true(len(lt.box) == 4)
+
+    # Make sure background image normalizing is correct
+    lt_bg = generate_2d_layout(xy, bg_image=bg_image)
+    assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
+
+run_tests_if_main()
diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py
new file mode 100644
index 0000000..23da88f
--- /dev/null
+++ b/mne/channels/tests/test_montage.py
@@ -0,0 +1,209 @@
+# Author: Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+
+from nose.tools import assert_equal
+
+import numpy as np
+from numpy.testing import (assert_array_equal, assert_almost_equal,
+                           assert_allclose, assert_array_almost_equal)
+
+from mne.channels.montage import read_montage, _set_montage, read_dig_montage
+from mne.utils import _TempDir
+from mne import create_info, EvokedArray
+from mne.coreg import fit_matched_points
+from mne.transforms import apply_trans, get_ras_to_neuromag_trans
+from mne.io.constants import FIFF
+from mne.io.meas_info import _read_dig_points
+from mne.io.kit import read_mrk
+
+
+p_dir = op.dirname(__file__)
+elp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_elp.txt')
+hsp = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_hsp.txt')
+hpi = op.join(p_dir, '..', '..', 'io', 'kit', 'tests', 'data', 'test_mrk.sqd')
+
+
+def test_montage():
+    """Test making montages"""
+    tempdir = _TempDir()
+    # no pep8
+    input_str = ["""FidNz 0.00000 10.56381 -2.05108
+    FidT9 -7.82694 0.45386 -3.76056
+    FidT10 7.82694 0.45386 -3.76056""",
+    """// MatLab   Sphere coordinates [degrees]         Cartesian coordinates
+    // Label       Theta       Phi    Radius         X         Y         Z       off sphere surface
+      E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011
+      E2      44.600      -0.880       1.000    0.7119    0.7021   -0.0154   0.00000000000000000
+      E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000""",  # noqa
+    """# ASA electrode file
+    ReferenceLabel  avg
+    UnitPosition    mm
+    NumberPositions=    68
+    Positions
+    -86.0761 -19.9897 -47.9860
+    85.7939 -20.0093 -48.0310
+    0.0083 86.8110 -39.9830
+    Labels
+    LPA
+    RPA
+    Nz
+    """,
+    """Site  Theta  Phi
+    Fp1  -92    -72
+    Fp2   92     72
+    F3   -60    -51
+    """,
+    """346
+     EEG	      F3	 -62.027	 -50.053	      85
+     EEG	      Fz	  45.608	      90	      85
+     EEG	      F4	   62.01	  50.103	      85
+    """,
+    """
+    eeg Fp1 -95.0 -31.0 -3.0
+    eeg AF7 -81 -59 -3
+    eeg AF3 -87 -41 28
+    """]
+    kinds = ['test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp',
+             'test.hpts']
+    for kind, text in zip(kinds, input_str):
+        fname = op.join(tempdir, kind)
+        with open(fname, 'w') as fid:
+            fid.write(text)
+        montage = read_montage(fname)
+        assert_equal(len(montage.ch_names), 3)
+        assert_equal(len(montage.ch_names), len(montage.pos))
+        assert_equal(montage.pos.shape, (3, 3))
+        assert_equal(montage.kind, op.splitext(kind)[0])
+        if kind.endswith('csd'):
+            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
+                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
+                     ('off_sph', 'f8')]
+            try:
+                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
+            except TypeError:
+                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
+            pos2 = np.c_[table['x'], table['y'], table['z']]
+            assert_array_almost_equal(pos2, montage.pos, 4)
+    # test transform
+    input_str = """
+    eeg Fp1 -95.0 -31.0 -3.0
+    eeg AF7 -81 -59 -3
+    eeg AF3 -87 -41 28
+    cardinal 2 -91 0 -42
+    cardinal 1 0 -91 -42
+    cardinal 3 0 91 -42
+    """
+    kind = 'test_fid.hpts'
+    fname = op.join(tempdir, kind)
+    with open(fname, 'w') as fid:
+        fid.write(input_str)
+    montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
+    # check coordinate transformation
+    pos = np.array([-95.0, -31.0, -3.0])
+    nasion = np.array([-91, 0, -42])
+    lpa = np.array([0, -91, -42])
+    rpa = np.array([0, 91, -42])
+    fids = np.vstack((nasion, lpa, rpa))
+    trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
+    pos = apply_trans(trans, pos)
+    assert_array_equal(montage.pos[0], pos)
+    idx = montage.ch_names.index('2')
+    assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
+    idx = montage.ch_names.index('1')
+    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
+    idx = montage.ch_names.index('3')
+    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
+    pos = np.array([-95.0, -31.0, -3.0])
+    montage_fname = op.join(tempdir, 'test_fid.hpts')
+    montage = read_montage(montage_fname, unit='mm')
+    assert_array_equal(montage.pos[0], pos * 1e-3)
+
+    # test with last
+    info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
+    _set_montage(info, montage)
+    pos2 = np.array([c['loc'][:3] for c in info['chs']])
+    assert_array_equal(pos2, montage.pos)
+    assert_equal(montage.ch_names, info['ch_names'])
+
+    info = create_info(
+        montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
+
+    evoked = EvokedArray(
+        data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)
+    evoked.set_montage(montage)
+    pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
+    assert_array_equal(pos3, montage.pos)
+    assert_equal(montage.ch_names, evoked.info['ch_names'])
+
+
+def test_read_dig_montage():
+    """Test read_dig_montage"""
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=False)
+    elp_points = _read_dig_points(elp)
+    hsp_points = _read_dig_points(hsp)
+    hpi_points = read_mrk(hpi)
+    assert_equal(montage.point_names, names)
+    assert_array_equal(montage.elp, elp_points)
+    assert_array_equal(montage.hsp, hsp_points)
+    assert_array_equal(montage.hpi, hpi_points)
+    assert_array_equal(montage.dev_head_t, np.identity(4))
+    montage = read_dig_montage(hsp, hpi, elp, names,
+                               transform=True, dev_head_t=True)
+    # check coordinate transformation
+    # nasion
+    assert_almost_equal(montage.elp[0, 0], 0)
+    assert_almost_equal(montage.nasion[0], 0)
+    assert_almost_equal(montage.elp[0, 2], 0)
+    assert_almost_equal(montage.nasion[0], 0)
+    # lpa and rpa
+    assert_allclose(montage.elp[1:3, 1:], 0, atol=1e-16)
+    assert_allclose(montage.lpa[1:], 0, atol=1e-16)
+    assert_allclose(montage.rpa[1:], 0, atol=1e-16)
+    # device head transform
+    dev_head_t = fit_matched_points(tgt_pts=montage.elp[3:],
+                                    src_pts=montage.hpi, out='trans')
+    assert_array_equal(montage.dev_head_t, dev_head_t)
+
+
+def test_set_dig_montage():
+    """Test applying DigMontage to inst
+
+    Extensive testing of applying `dig` to info is done in test_meas_info
+    with `test_make_dig_points`.
+    """
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    hsp_points = _read_dig_points(hsp)
+    elp_points = _read_dig_points(elp)
+    hpi_points = read_mrk(hpi)
+    p0, p1, p2 = elp_points[:3]
+    nm_trans = get_ras_to_neuromag_trans(p0, p1, p2)
+    elp_points = apply_trans(nm_trans, elp_points)
+    nasion_point, lpa_point, rpa_point = elp_points[:3]
+    hsp_points = apply_trans(nm_trans, hsp_points)
+
+    montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=True)
+    info = create_info(['Test Ch'], 1e3, ['eeg'])
+    _set_montage(info, montage)
+    hs = np.array([p['r'] for i, p in enumerate(info['dig'])
+                   if p['kind'] == FIFF.FIFFV_POINT_EXTRA])
+    nasion_dig = np.array([p['r'] for p in info['dig']
+                           if all([p['ident'] == FIFF.FIFFV_POINT_NASION,
+                                   p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    lpa_dig = np.array([p['r'] for p in info['dig']
+                        if all([p['ident'] == FIFF.FIFFV_POINT_LPA,
+                                p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    rpa_dig = np.array([p['r'] for p in info['dig']
+                        if all([p['ident'] == FIFF.FIFFV_POINT_RPA,
+                                p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
+    hpi_dig = np.array([p['r'] for p in info['dig']
+                        if p['kind'] == FIFF.FIFFV_POINT_HPI])
+    assert_array_equal(hs, hsp_points)
+    assert_array_equal(nasion_dig.ravel(), nasion_point)
+    assert_array_equal(lpa_dig.ravel(), lpa_point)
+    assert_array_equal(rpa_dig.ravel(), rpa_point)
+    assert_array_equal(hpi_dig, hpi_points)
+    assert_array_equal(montage.dev_head_t, info['dev_head_t']['trans'])
diff --git a/mne/chpi.py b/mne/chpi.py
new file mode 100644
index 0000000..13e4bf3
--- /dev/null
+++ b/mne/chpi.py
@@ -0,0 +1,440 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from os import path as op
+from scipy import linalg
+
+from .io.pick import pick_types, pick_channels
+from .io.base import _BaseRaw
+from .io.constants import FIFF
+from .forward import (_magnetic_dipole_field_vec, _create_meg_coils,
+                      _concatenate_coils)
+from .cov import make_ad_hoc_cov, _get_whitener_data
+from .transforms import apply_trans, invert_transform
+from .utils import verbose, logger, check_version
+from .fixes import partial
+from .externals.six import string_types
+
+
+# ############################################################################
+# Reading from text or FIF file
+
+ at verbose
+def get_chpi_positions(raw, t_step=None, verbose=None):
+    """Extract head positions
+
+    Note that the raw instance must have CHPI channels recorded.
+
+    Parameters
+    ----------
+    raw : instance of Raw | str
+        Raw instance to extract the head positions from. Can also be a
+        path to a Maxfilter log file (str).
+    t_step : float | None
+        Sampling interval to use when converting data. If None, it will
+        be automatically determined. By default, a sampling interval of
+        1 second is used if processing a raw data. If processing a
+        Maxfilter log file, this must be None because the log file
+        itself will determine the sampling interval.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The digitized HPI head frame y is related to the frame position X as:
+
+        Y = np.dot(rotation, X) + translation
+
+    Note that if a Maxfilter log file is being processed, the start time
+    may not use the same reference point as the rest of mne-python (i.e.,
+    it could be referenced relative to raw.first_samp or something else).
+    """
+    if isinstance(raw, _BaseRaw):
+        # for simplicity, we'll sample at 1 sec intervals like maxfilter
+        if t_step is None:
+            t_step = 1.0
+        t_step = float(t_step)
+        picks = pick_types(raw.info, meg=False, ref_meg=False,
+                           chpi=True, exclude=[])
+        if len(picks) == 0:
+            raise RuntimeError('raw file has no CHPI channels')
+        time_idx = raw.time_as_index(np.arange(0, raw.times[-1], t_step))
+        data = [raw[picks, ti] for ti in time_idx]
+        t = np.array([d[1] for d in data])
+        data = np.array([d[0][:, 0] for d in data])
+        data = np.c_[t, data]
+    else:
+        if not isinstance(raw, string_types):
+            raise TypeError('raw must be an instance of Raw or string')
+        if not op.isfile(raw):
+            raise IOError('File "%s" does not exist' % raw)
+        if t_step is not None:
+            raise ValueError('t_step must be None if processing a log')
+        data = np.loadtxt(raw, skiprows=1)  # first line is header, skip it
+    return _quats_to_trans_rot_t(data)
+
+
+def _quats_to_trans_rot_t(quats):
+    """Convert Maxfilter-formatted head position quaternions
+
+    Parameters
+    ----------
+    quats : ndarray, shape (N, 10)
+        Maxfilter-formatted quaternions.
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    See Also
+    --------
+    _calculate_chpi_positions, get_chpi_positions
+    """
+    t = quats[:, 0].copy()
+    rotation = _quat_to_rot(quats[:, 1:4])
+    translation = quats[:, 4:7].copy()
+    return translation, rotation, t
+
+
+def _quat_to_rot(q):
+    """Helper to convert quaternions to rotations"""
+    # z = a + bi + cj + dk
+    b, c, d = q[..., 0], q[..., 1], q[..., 2]
+    bb, cc, dd = b * b, c * c, d * d
+    # use max() here to be safe in case roundoff errs put us over
+    aa = np.maximum(1. - bb - cc - dd, 0.)
+    a = np.sqrt(aa)
+    ab_2 = 2 * a * b
+    ac_2 = 2 * a * c
+    ad_2 = 2 * a * d
+    bc_2 = 2 * b * c
+    bd_2 = 2 * b * d
+    cd_2 = 2 * c * d
+    rotation = np.array([(aa + bb - cc - dd, bc_2 - ad_2, bd_2 + ac_2),
+                         (bc_2 + ad_2, aa + cc - bb - dd, cd_2 - ab_2),
+                         (bd_2 - ac_2, cd_2 + ab_2, aa + dd - bb - cc),
+                         ])
+    if q.ndim > 1:
+        rotation = np.rollaxis(np.rollaxis(rotation, 1, q.ndim + 1), 0, q.ndim)
+    return rotation
+
+
+def _rot_to_quat(rot):
+    """Here we derive qw from qx, qy, qz"""
+    qw_4 = np.sqrt(1 + rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2]) * 2
+    qx = (rot[..., 2, 1] - rot[..., 1, 2]) / qw_4
+    qy = (rot[..., 0, 2] - rot[..., 2, 0]) / qw_4
+    qz = (rot[..., 1, 0] - rot[..., 0, 1]) / qw_4
+    return np.rollaxis(np.array((qx, qy, qz)), 0, rot.ndim - 1)
+
+
+# ############################################################################
+# Estimate positions from data
+
+def _get_hpi_info(info):
+    """Helper to get HPI information from raw"""
+    if len(info['hpi_meas']) == 0 or \
+            ('coil_freq' not in info['hpi_meas'][0]['hpi_coils'][0]):
+        raise RuntimeError('Appropriate cHPI information not found in'
+                           'raw.info["hpi_meas"], cannot process cHPI')
+    hpi_result = info['hpi_results'][-1]
+    hpi_coils = info['hpi_meas'][-1]['hpi_coils']
+    hpi_num = np.array([h['number'] for h in hpi_coils])
+    pos_order = np.searchsorted(hpi_num, hpi_result['order'])
+    hpi_dig = [d for d in info['dig'] if d['kind'] == FIFF.FIFFV_POINT_HPI]
+    # this shouldn't happen, eventually we could add the transforms
+    # necessary to put it in head coords
+    if not all(d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig):
+        raise RuntimeError('cHPI coordinate frame incorrect')
+    hpi_rrs = np.array([d['r'] for d in hpi_dig])[pos_order]
+    hpi_freqs = np.array([float(x['coil_freq']) for x in hpi_coils])
+    # how cHPI active is indicated in the FIF file
+    hpi_sub = info['hpi_subsystem']
+    hpi_pick = pick_channels(info['ch_names'], [hpi_sub['event_channel']])[0]
+    hpi_on = np.sum([coil['event_bits'][0] for coil in hpi_sub['hpi_coils']])
+    return hpi_freqs, hpi_rrs, hpi_pick, hpi_on, pos_order
+
+
+def _magnetic_dipole_objective(x, B, B2, w, coils):
+    """Project data onto right eigenvectors of whitened forward"""
+    fwd = np.dot(_magnetic_dipole_field_vec(x[np.newaxis, :], coils), w.T)
+    one = np.dot(linalg.svd(fwd, full_matrices=False)[2], B)
+    Bm2 = np.sum(one * one)
+    return B2 - Bm2
+
+
+def _fit_magnetic_dipole(B_orig, w, coils, x0):
+    """Fit a single bit of data (x0 = pos)"""
+    from scipy.optimize import fmin_cobyla
+    B = np.dot(w, B_orig)
+    B2 = np.dot(B, B)
+    objective = partial(_magnetic_dipole_objective, B=B, B2=B2,
+                        w=w, coils=coils)
+    x = fmin_cobyla(objective, x0, (), rhobeg=1e-2, rhoend=1e-4, disp=False)
+    return x, 1. - objective(x) / B2
+
+
+def _chpi_objective(x, est_pos_dev, hpi_head_rrs):
+    """Helper objective function"""
+    rot = _quat_to_rot(x[:3]).T
+    d = np.dot(est_pos_dev, rot) + x[3:] - hpi_head_rrs
+    return np.sum(d * d)
+
+
+def _fit_chpi_pos(est_pos_dev, hpi_head_rrs, x0):
+    """Fit rotation and translation parameters for cHPI coils"""
+    from scipy.optimize import fmin_cobyla
+    denom = np.sum((hpi_head_rrs - np.mean(hpi_head_rrs, axis=0)) ** 2)
+    objective = partial(_chpi_objective, est_pos_dev=est_pos_dev,
+                        hpi_head_rrs=hpi_head_rrs)
+    x = fmin_cobyla(objective, x0, (), rhobeg=1e-2, rhoend=1e-6, disp=False)
+    return x, 1. - objective(x) / denom
+
+
+def _angle_between_quats(x, y):
+    """Compute the angle between two quaternions w/3-element representations"""
+    # convert to complete quaternion representation
+    # use max() here to be safe in case roundoff errs put us over
+    x0 = np.sqrt(np.maximum(1. - x[..., 0] ** 2 -
+                            x[..., 1] ** 2 - x[..., 2] ** 2, 0.))
+    y0 = np.sqrt(np.maximum(1. - y[..., 0] ** 2 -
+                            y[..., 1] ** 2 - y[..., 2] ** 2, 0.))
+    # the difference z = x * conj(y), and theta = np.arccos(z0)
+    z0 = np.maximum(np.minimum(y0 * x0 + (x * y).sum(axis=-1), 1.), -1)
+    return 2 * np.arccos(z0)
+
+
+ at verbose
+def _calculate_chpi_positions(raw, t_step_min=0.1, t_step_max=10.,
+                              t_window=0.2, dist_limit=0.005, gof_limit=0.98,
+                              verbose=None):
+    """Calculate head positions using cHPI coils
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        Raw data with cHPI information.
+    t_step_min : float
+        Minimum time step to use. If correlations are sufficiently high,
+        t_step_max will be used.
+    t_step_max : float
+        Maximum time step to use.
+    t_window : float
+        Time window to use to estimate the head positions.
+    max_step : float
+        Maximum time step to go between estimations.
+    dist_limit : float
+        Minimum distance (m) to accept for coil position fitting.
+    gof_limit : float
+        Minimum goodness of fit to accept.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The number of time points ``N`` will depend on the velocity of head
+    movements as well as ``t_step_max`` and ``t_step_min``.
+
+    See Also
+    --------
+    get_chpi_positions
+    """
+    from scipy.spatial.distance import cdist
+    if not (check_version('numpy', '1.7') and check_version('scipy', '0.11')):
+        raise RuntimeError('numpy>=1.7 and scipy>=0.11 required')
+    hpi_freqs, orig_head_rrs, hpi_pick, hpi_on, order = _get_hpi_info(raw.info)
+    sfreq, ch_names = raw.info['sfreq'], raw.info['ch_names']
+    # initial transforms
+    dev_head_t = raw.info['dev_head_t']['trans']
+    head_dev_t = invert_transform(raw.info['dev_head_t'])['trans']
+    # determine timing
+    n_window = int(round(t_window * sfreq))
+    fit_starts = np.round(np.arange(0, raw.last_samp / sfreq, t_step_min) *
+                          sfreq).astype(int)
+    fit_starts = fit_starts[fit_starts < raw.n_times - n_window]
+    fit_times = (fit_starts + (n_window + 1) // 2) / sfreq
+    n_freqs = len(hpi_freqs)
+    logger.info('HPIFIT: %s coils digitized in order %s'
+                % (n_freqs, ' '.join(str(o + 1) for o in order)))
+    logger.info('Coordinate transformation:')
+    for d in (dev_head_t[0, :3], dev_head_t[1, :3], dev_head_t[2, :3],
+              dev_head_t[:3, 3] * 1000.):
+        logger.info('{0:8.4f} {1:8.4f} {2:8.4f}'.format(*d))
+    logger.info('Using %s HPI coils: %s Hz'
+                % (n_freqs, ' '.join(str(int(s)) for s in hpi_freqs)))
+    # Set up amplitude fits
+    slope = np.arange(n_window).astype(np.float64)[:, np.newaxis]
+    f_t = 2 * np.pi * hpi_freqs[np.newaxis, :] * (slope / sfreq)
+    model = np.concatenate([np.sin(f_t), np.cos(f_t),
+                            slope, np.ones((n_window, 1))], axis=1)
+    inv_model = linalg.pinv(model)
+    del slope, f_t
+
+    # Set up magnetic dipole fits
+    picks = pick_types(raw.info, meg=True, eeg=False)
+    picks_chpi = np.concatenate([picks, [hpi_pick]])
+    logger.info('Found %s total and %s good MEG channels'
+                % (len(ch_names), len(picks)))
+    megchs = [ch for ci, ch in enumerate(raw.info['chs']) if ci in picks]
+    coils = _concatenate_coils(_create_meg_coils(megchs, 'normal'))
+
+    cov = make_ad_hoc_cov(raw.info, verbose=False)
+    whitener = _get_whitener_data(raw.info, cov, picks, verbose=False)
+    dev_head_quat = np.concatenate([_rot_to_quat(dev_head_t[:3, :3]),
+                                    dev_head_t[:3, 3]])
+    orig_dists = cdist(orig_head_rrs, orig_head_rrs)
+    last_quat = dev_head_quat.copy()
+    last_data_fit = None  # this indicates it's the first run
+    last_time = -t_step_min
+    last_head_rrs = orig_head_rrs.copy()
+    corr_limit = 0.98
+    quats = []
+    est_pos_dev = apply_trans(head_dev_t, orig_head_rrs)
+    for start, t in zip(fit_starts, fit_times):
+        #
+        # 1. Fit amplitudes for each channel from each of the N cHPI sinusoids
+        #
+        meg_chpi_data = raw[picks_chpi, start:start + n_window][0]
+        this_data = meg_chpi_data[:-1]
+        chpi_data = meg_chpi_data[-1]
+        if not (chpi_data == hpi_on).all():
+            logger.info('HPI not turned on (t=%7.3f)' % t)
+            continue
+        X = np.dot(inv_model, this_data.T)
+        data_diff = np.dot(model, X).T - this_data
+        data_diff *= data_diff
+        this_data *= this_data
+        g_chan = (1 - np.sqrt(data_diff.sum(axis=1) / this_data.sum(axis=1)))
+        g_sin = (1 - np.sqrt(data_diff.sum() / this_data.sum()))
+        del data_diff, this_data
+        X_sin, X_cos = X[:n_freqs], X[n_freqs:2 * n_freqs]
+        s_fit = np.sqrt(X_cos * X_cos + X_sin * X_sin)
+        if last_data_fit is None:  # first iteration
+            corr = 0.
+        else:
+            corr = np.corrcoef(s_fit.ravel(), last_data_fit.ravel())[0, 1]
+
+        # check to see if we need to continue
+        if t - last_time <= t_step_max - 1e-7 and corr > corr_limit and \
+                t != fit_times[-1]:
+            continue  # don't need to re-fit data
+        last_data_fit = s_fit.copy()  # save *before* inplace sign transform
+
+        # figure out principal direction of the vectors and align
+        # for s, c, fit in zip(X_sin, X_cos, s_fit):
+        #     fit *= np.sign(linalg.svd([s, c], full_matrices=False)[2][0])
+        s_fit *= np.sign(np.arctan2(X_sin, X_cos))
+
+        #
+        # 2. Fit magnetic dipole for each coil to obtain coil positions
+        #    in device coordinates
+        #
+        logger.info('HPI amplitude correlation %s: %s (%s chnls > 0.95)'
+                    % (t, g_sin, (g_chan > 0.95).sum()))
+        outs = [_fit_magnetic_dipole(f, whitener, coils, pos)
+                for f, pos in zip(s_fit, est_pos_dev)]
+        est_pos_dev = np.array([o[0] for o in outs])
+        g_coils = [o[1] for o in outs]
+        these_dists = cdist(est_pos_dev, est_pos_dev)
+        these_dists = np.abs(orig_dists - these_dists)
+        # there is probably a better algorithm for finding the bad ones...
+        good = False
+        use_mask = np.ones(n_freqs, bool)
+        while not good:
+            d = (these_dists[use_mask][:, use_mask] <= dist_limit)
+            good = d.all()
+            if not good:
+                if use_mask.sum() == 2:
+                    use_mask[:] = False
+                    break  # failure
+                # exclude next worst point
+                badness = these_dists[use_mask][:, use_mask].sum(axis=0)
+                exclude = np.where(use_mask)[0][np.argmax(badness)]
+                use_mask[exclude] = False
+        good = use_mask.sum() >= 3
+        if not good:
+            logger.warning('    %s/%s acceptable hpi fits found, cannot '
+                           'determine the transformation! (t=%7.3f)'
+                           % (use_mask.sum(), n_freqs, t))
+            continue
+
+        #
+        # 3. Fit the head translation and rotation params (minimize error
+        #    between coil positions and the head coil digitization positions)
+        #
+        dev_head_quat, g = _fit_chpi_pos(est_pos_dev[use_mask],
+                                         orig_head_rrs[use_mask],
+                                         dev_head_quat)
+        if g < gof_limit:
+            logger.info('    Bad coil fit for %s! (t=%7.3f)' % t)
+            continue
+        this_dev_head_t = np.concatenate((_quat_to_rot(dev_head_quat[:3]),
+                                          dev_head_quat[3:][:, np.newaxis]),
+                                         axis=1)
+        this_dev_head_t = np.concatenate((this_dev_head_t, [[0, 0, 0, 1.]]))
+        this_head_rrs = apply_trans(this_dev_head_t, est_pos_dev)
+        dt = t - last_time
+        vs = tuple(1000. * np.sqrt(np.sum((last_head_rrs -
+                                           this_head_rrs) ** 2, axis=1)) / dt)
+        logger.info('Hpi fit OK, movements [mm/s] = ' +
+                    ' / '.join(['%0.1f'] * n_freqs) % vs)
+        errs = [0] * n_freqs  # XXX eventually calculate this
+        e = 0.  # XXX eventually calculate this
+        d = 100 * np.sqrt(np.sum(last_quat[3:] - dev_head_quat[3:]) ** 2)  # cm
+        r = _angle_between_quats(last_quat[:3], dev_head_quat[:3]) / dt
+        v = d / dt  # cm/sec
+        for ii in range(n_freqs):
+            if use_mask[ii]:
+                start, end = ' ', '/'
+            else:
+                start, end = '(', ')'
+            log_str = (start +
+                       '{0:6.1f} {1:6.1f} {2:6.1f} / ' +
+                       '{3:6.1f} {4:6.1f} {5:6.1f} / ' +
+                       'g = {6:0.3f} err = {7:4.1f} ' +
+                       end)
+            if ii <= 2:
+                log_str += '{8:6.3f} {9:6.3f} {10:6.3f}'
+            elif ii == 3:
+                log_str += '{8:6.1f} {9:6.1f} {10:6.1f}'
+            vals = np.concatenate((1000 * orig_head_rrs[ii],
+                                   1000 * this_head_rrs[ii],
+                                   [g_coils[ii], errs[ii]]))
+            if ii <= 2:
+                vals = np.concatenate((vals, this_dev_head_t[ii, :3]))
+            elif ii == 3:
+                vals = np.concatenate((vals, this_dev_head_t[:3, 3] * 1000.))
+            logger.debug(log_str.format(*vals))
+        logger.info('#t = %0.3f, #e = %0.2f cm, #g = %0.3f, #v = %0.2f cm/s, '
+                    '#r = %0.2f rad/s, #d = %0.2f cm' % (t, e, g, v, r, d))
+        quats.append(np.concatenate(([t], dev_head_quat, [g], [1. - g], [v])))
+        last_time = t
+        last_head_rrs = this_head_rrs.copy()
+    quats = np.array(quats)
+    quats = np.zeros((0, 10)) if quats.size == 0 else quats
+    return _quats_to_trans_rot_t(quats)
diff --git a/mne/commands/__init__.py b/mne/commands/__init__.py
index e69de29..eb018c3 100644
--- a/mne/commands/__init__.py
+++ b/mne/commands/__init__.py
@@ -0,0 +1 @@
+from . import utils
diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py
index b4ccb5c..409aabf 100755
--- a/mne/commands/mne_browse_raw.py
+++ b/mne/commands/mne_browse_raw.py
@@ -3,7 +3,9 @@
 
 You can do for example:
 
-$ mne browse_raw --raw sample_audvis_raw.fif --proj sample_audvis_ecg_proj.fif --eve sample_audvis_raw-eve.fif
+$ mne browse_raw --raw sample_audvis_raw.fif \
+                 --proj sample_audvis_ecg_proj.fif \
+                 --eve sample_audvis_raw-eve.fif
 """
 
 # Authors : Eric Larson, PhD
@@ -12,8 +14,7 @@ import sys
 import mne
 
 
-if __name__ == '__main__':
-
+def run():
     import matplotlib.pyplot as plt
 
     from mne.commands.utils import get_optparser
@@ -46,6 +47,21 @@ if __name__ == '__main__':
     parser.add_option("-s", "--show_options", dest="show_options",
                       help="Show projection options dialog",
                       default=False)
+    parser.add_option("--allowmaxshield", dest="maxshield",
+                      help="Allow loading MaxShield processed data",
+                      action="store_true")
+    parser.add_option("--highpass", dest="highpass", type="float",
+                      help="Display high-pass filter corner frequency",
+                      default=-1)
+    parser.add_option("--lowpass", dest="lowpass", type="float",
+                      help="Display low-pass filter corner frequency",
+                      default=-1)
+    parser.add_option("--filtorder", dest="filtorder", type="int",
+                      help="Display filtering IIR order",
+                      default=4)
+    parser.add_option("--clipping", dest="clipping",
+                      help="Enable trace clipping mode, either 'clip' or "
+                      "'transparent'", default=None)
 
     options, args = parser.parse_args()
 
@@ -58,12 +74,17 @@ if __name__ == '__main__':
     show_options = options.show_options
     proj_in = options.proj_in
     eve_in = options.eve_in
+    maxshield = options.maxshield
+    highpass = options.highpass
+    lowpass = options.lowpass
+    filtorder = options.filtorder
+    clipping = options.clipping
 
     if raw_in is None:
         parser.print_help()
         sys.exit(1)
 
-    raw = mne.io.Raw(raw_in, preload=preload)
+    raw = mne.io.Raw(raw_in, preload=preload, allow_maxshield=maxshield)
     if len(proj_in) > 0:
         projs = mne.read_proj(proj_in)
         raw.info['projs'] = projs
@@ -71,6 +92,16 @@ if __name__ == '__main__':
         events = mne.read_events(eve_in)
     else:
         events = None
-    fig = raw.plot(duration=duration, start=start, n_channels=n_channels,
-                   order=order, show_options=show_options, events=events)
+    highpass = None if highpass < 0 or filtorder <= 0 else highpass
+    lowpass = None if lowpass < 0 or filtorder <= 0 else lowpass
+    filtorder = 4 if filtorder <= 0 else filtorder
+    raw.plot(duration=duration, start=start, n_channels=n_channels,
+             order=order, show_options=show_options, events=events,
+             highpass=highpass, lowpass=lowpass, filtorder=filtorder,
+             clipping=clipping)
     plt.show(block=True)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_bti2fiff.py b/mne/commands/mne_bti2fiff.py
index 28f983f..98ccd05 100755
--- a/mne/commands/mne_bti2fiff.py
+++ b/mne/commands/mne_bti2fiff.py
@@ -29,34 +29,34 @@ import sys
 from mne.io import read_raw_bti
 
 
-if __name__ == '__main__':
-
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
 
     parser.add_option('-p', '--pdf', dest='pdf_fname',
-                    help='Input data file name', metavar='FILE')
+                      help='Input data file name', metavar='FILE')
     parser.add_option('-c', '--config', dest='config_fname',
-                    help='Input config file name', metavar='FILE', default='config')
+                      help='Input config file name', metavar='FILE',
+                      default='config')
     parser.add_option('--head_shape', dest='head_shape_fname',
-                    help='Headshape file name', metavar='FILE',
-                    default='hs_file')
+                      help='Headshape file name', metavar='FILE',
+                      default='hs_file')
     parser.add_option('-o', '--out_fname', dest='out_fname',
                       help='Name of the resulting fiff file',
                       default='as_data_fname')
     parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
-                    help='Compensatory rotation about Neuromag x axis, deg',
-                    default=2.0)
+                      help='Compensatory rotation about Neuromag x axis, deg',
+                      default=2.0)
     parser.add_option('-T', '--translation', dest='translation', type='str',
-                    help='Default translation, meter',
-                    default=(0.00, 0.02, 0.11))
+                      help='Default translation, meter',
+                      default=(0.00, 0.02, 0.11))
     parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
-                    help='4D ECG channel name',
-                    default='E31')
+                      help='4D ECG channel name',
+                      default='E31')
     parser.add_option('--eog_ch', dest='eog_ch', type='str',
-                    help='4D EOG channel names',
-                    default='E63,E64')
+                      help='4D EOG channel names',
+                      default='E63,E64')
 
     options, args = parser.parse_args()
 
@@ -83,4 +83,9 @@ if __name__ == '__main__':
 
     raw.save(out_fname)
     raw.close()
-    sys.exit(0)
+    if is_main:
+        sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_clean_eog_ecg.py b/mne/commands/mne_clean_eog_ecg.py
index 4e8f54a..3aa9397 100755
--- a/mne/commands/mne_clean_eog_ecg.py
+++ b/mne/commands/mne_clean_eog_ecg.py
@@ -8,7 +8,6 @@ from __future__ import print_function
 #           Alexandre Gramfort, Ph.D.
 
 
-import os
 import sys
 
 import mne
@@ -16,22 +15,23 @@ import mne
 
 def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
                   ecg_proj_fname=None, eog_proj_fname=None,
-                  ecg_event_fname=None, eog_event_fname=None, in_path='.'):
+                  ecg_event_fname=None, eog_event_fname=None, in_path='.',
+                  quiet=False):
     """Clean ECG from raw fif file
 
     Parameters
     ----------
-    in_fif_fname : string
+    in_fif_fname : str
         Raw fif File
-    eog_event_fname : string
+    eog_event_fname : str
         name of EOG event file required.
     eog : bool
         Reject or not EOG artifacts.
     ecg : bool
         Reject or not ECG artifacts.
-    ecg_event_fname : string
+    ecg_event_fname : str
         name of ECG event file required.
-    in_path :
+    in_path : str
         Path where all the files are.
     """
     if not eog and not ecg:
@@ -48,9 +48,9 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
     if out_fif_fname is None:
         out_fif_fname = prefix + '_clean_ecg_eog_raw.fif'
     if ecg_proj_fname is None:
-        ecg_proj_fname = prefix + '_ecg_proj.fif'
+        ecg_proj_fname = prefix + '_ecg-proj.fif'
     if eog_proj_fname is None:
-        eog_proj_fname = prefix + '_eog_proj.fif'
+        eog_proj_fname = prefix + '_eog-proj.fif'
     if ecg_event_fname is None:
         ecg_event_fname = prefix + '_ecg-eve.fif'
     if eog_event_fname is None:
@@ -58,58 +58,41 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
 
     print('Implementing ECG and EOG artifact rejection on data')
 
+    kwargs = dict() if quiet else dict(stdout=None, stderr=None)
     if ecg:
-        ecg_events, _, _  = mne.preprocessing.find_ecg_events(raw_in)
+        ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw_in)
         print("Writing ECG events in %s" % ecg_event_fname)
         mne.write_events(ecg_event_fname, ecg_events)
-
         print('Computing ECG projector')
-
-        command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
-                   '--projtmin -0.08 --projtmax 0.08 --saveprojtag _ecg_proj '
-                   '--projnmag 2 --projngrad 1 --projevent 999 --highpass 5 '
-                   '--lowpass 35 --projmagrej 4000  --projgradrej 3000'
-                   % (in_path, in_fif_fname, ecg_event_fname))
-        st = os.system(command)
-
-        if st != 0:
-            print("Error while running : %s" % command)
-
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--events', ecg_event_fname, '--makeproj',
+                   '--projtmin', '-0.08', '--projtmax', '0.08',
+                   '--saveprojtag', '_ecg-proj', '--projnmag', '2',
+                   '--projngrad', '1', '--projevent', '999', '--highpass', '5',
+                   '--lowpass', '35', '--projmagrej', '4000',
+                   '--projgradrej', '3000')
+        mne.utils.run_subprocess(command, **kwargs)
     if eog:
         eog_events = mne.preprocessing.find_eog_events(raw_in)
         print("Writing EOG events in %s" % eog_event_fname)
         mne.write_events(eog_event_fname, eog_events)
-
         print('Computing EOG projector')
-
-        command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
-                   '--projtmin -0.15 --projtmax 0.15 --saveprojtag _eog_proj '
-                   '--projnmag 2 --projngrad 2 --projevent 998 --lowpass 35 '
-                   '--projmagrej 4000  --projgradrej 3000' % (in_path,
-                   in_fif_fname, eog_event_fname))
-
-        print('Running : %s' % command)
-
-        st = os.system(command)
-        if st != 0:
-            raise ValueError('Problem while running : %s' % command)
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--events', eog_event_fname, '--makeproj',
+                   '--projtmin', '-0.15', '--projtmax', '0.15',
+                   '--saveprojtag', '_eog-proj', '--projnmag', '2',
+                   '--projngrad', '2', '--projevent', '998', '--lowpass', '35',
+                   '--projmagrej', '4000', '--projgradrej', '3000')
+        mne.utils.run_subprocess(command, **kwargs)
 
     if out_fif_fname is not None:
         # Applying the ECG EOG projector
         print('Applying ECG EOG projector')
-
-        command = ('mne_process_raw --cd %s --raw %s '
-                   '--proj %s --projoff --save %s --filteroff'
-                   % (in_path, in_fif_fname, in_fif_fname, out_fif_fname))
-        command += ' --proj %s --proj %s' % (ecg_proj_fname, eog_proj_fname)
-
-        print('Command executed: %s' % command)
-
-        st = os.system(command)
-
-        if st != 0:
-            raise ValueError('Pb while running : %s' % command)
-
+        command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
+                   '--proj', in_fif_fname, '--projoff', '--save',
+                   out_fif_fname, '--filteroff',
+                   '--proj', ecg_proj_fname, '--proj', eog_proj_fname)
+        mne.utils.run_subprocess(command, **kwargs)
         print('Done removing artifacts.')
         print("Cleaned raw data saved in: %s" % out_fif_fname)
         print('IMPORTANT : Please eye-ball the data !!')
@@ -117,21 +100,22 @@ def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
         print('Projection not applied to raw data.')
 
 
-if __name__ == '__main__':
-
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
 
     parser.add_option("-i", "--in", dest="raw_in",
-                    help="Input raw FIF file", metavar="FILE")
+                      help="Input raw FIF file", metavar="FILE")
     parser.add_option("-o", "--out", dest="raw_out",
-                    help="Output raw FIF file", metavar="FILE",
-                    default=None)
+                      help="Output raw FIF file", metavar="FILE",
+                      default=None)
     parser.add_option("-e", "--no-eog", dest="eog", action="store_false",
-                    help="Remove EOG", default=True)
+                      help="Remove EOG", default=True)
     parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false",
-                    help="Remove ECG", default=True)
+                      help="Remove ECG", default=True)
+    parser.add_option("-q", "--quiet", dest="quiet", action="store_true",
+                      help="Suppress mne_process_raw output", default=False)
 
     options, args = parser.parse_args()
 
@@ -143,5 +127,11 @@ if __name__ == '__main__':
     raw_out = options.raw_out
     eog = options.eog
     ecg = options.ecg
+    quiet = options.quiet
+
+    clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet)
+
 
-    clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg)
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_compare_fiff.py b/mne/commands/mne_compare_fiff.py
new file mode 100755
index 0000000..bc8a223
--- /dev/null
+++ b/mne/commands/mne_compare_fiff.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+"""Compare FIFF files
+
+You can do for example:
+
+$ mne compare_fiff test_raw.fif test_raw_sss.fif
+"""
+
+# Authors : Eric Larson, PhD
+
+import sys
+import mne
+
+
+def run():
+    parser = mne.commands.utils.get_optparser(
+        __file__, usage='mne compare_fiff <file_a> <file_b>')
+    options, args = parser.parse_args()
+    if len(args) != 2:
+        parser.print_help()
+        sys.exit(1)
+    mne.viz.compare_fiff(args[0], args[1])
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_compute_proj_ecg.py b/mne/commands/mne_compute_proj_ecg.py
index 55275b3..735a6db 100755
--- a/mne/commands/mne_compute_proj_ecg.py
+++ b/mne/commands/mne_compute_proj_ecg.py
@@ -3,7 +3,9 @@
 
 You can do for example:
 
-$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" --l-freq 1 --h-freq 100 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" \
+                       --l-freq 1 --h-freq 100 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
 """
 from __future__ import print_function
 
@@ -16,8 +18,7 @@ import sys
 import mne
 
 
-if __name__ == '__main__':
-
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -155,7 +156,8 @@ if __name__ == '__main__':
             raise ValueError('qrsthr must be "auto" or a float')
 
     if bad_fname is not None:
-        bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
+        with open(bad_fname, 'r') as fid:
+            bads = [w.rstrip() for w in fid.readlines()]
         print('Bad channels read : %s' % bads)
     else:
         bads = []
@@ -168,9 +170,9 @@ if __name__ == '__main__':
     ecg_event_fname = prefix + '_ecg-eve.fif'
 
     if average:
-        ecg_proj_fname = prefix + '_ecg_avg_proj.fif'
+        ecg_proj_fname = prefix + '_ecg_avg-proj.fif'
     else:
-        ecg_proj_fname = prefix + '_ecg_proj.fif'
+        ecg_proj_fname = prefix + '_ecg-proj.fif'
 
     raw = mne.io.Raw(raw_in, preload=preload)
 
@@ -205,3 +207,7 @@ if __name__ == '__main__':
 
     print("Writing ECG events in %s" % ecg_event_fname)
     mne.write_events(ecg_event_fname, events)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_compute_proj_eog.py b/mne/commands/mne_compute_proj_eog.py
index 96f31ca..e48740b 100755
--- a/mne/commands/mne_compute_proj_eog.py
+++ b/mne/commands/mne_compute_proj_eog.py
@@ -3,11 +3,16 @@
 
 You can do for example:
 
-$ mne compute_proj_eog -i sample_audvis_raw.fif --l-freq 1 --h-freq 35 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
+$ mne compute_proj_eog -i sample_audvis_raw.fif \
+                       --l-freq 1 --h-freq 35 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
 
 or
 
-$ mne compute_proj_eog -i sample_audvis_raw.fif --l-freq 1 --h-freq 35 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 --proj sample_audvis_ecg_proj.fif
+$ mne compute_proj_eog -i sample_audvis_raw.fif \
+                       --l-freq 1 --h-freq 35 \
+                       --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 \
+                       --proj sample_audvis_ecg-proj.fif
 
 to exclude ECG artifacts from projection computation.
 """
@@ -22,8 +27,7 @@ import sys
 import mne
 
 
-if __name__ == '__main__':
-
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -31,11 +35,9 @@ if __name__ == '__main__':
     parser.add_option("-i", "--in", dest="raw_in",
                       help="Input raw FIF file", metavar="FILE")
     parser.add_option("--tmin", dest="tmin", type="float",
-                      help="Time before event in seconds",
-                      default=-0.2)
+                      help="Time before event in seconds", default=-0.2)
     parser.add_option("--tmax", dest="tmax", type="float",
-                      help="Time after event in seconds",
-                      default=0.2)
+                      help="Time after event in seconds", default=0.2)
     parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
                       help="Number of SSP vectors for gradiometers",
                       default=2)
@@ -43,8 +45,7 @@ if __name__ == '__main__':
                       help="Number of SSP vectors for magnetometers",
                       default=2)
     parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
-                      help="Number of SSP vectors for EEG",
-                      default=2)
+                      help="Number of SSP vectors for EEG", default=2)
     parser.add_option("--l-freq", dest="l_freq", type="float",
                       help="Filter low cut-off frequency in Hz",
                       default=1)
@@ -52,14 +53,14 @@ if __name__ == '__main__':
                       help="Filter high cut-off frequency in Hz",
                       default=35)
     parser.add_option("--eog-l-freq", dest="eog_l_freq", type="float",
-                      help="Filter low cut-off frequency in Hz used for EOG event detection",
-                      default=1)
+                      help="Filter low cut-off frequency in Hz used for "
+                      "EOG event detection", default=1)
     parser.add_option("--eog-h-freq", dest="eog_h_freq", type="float",
-                      help="Filter high cut-off frequency in Hz used for EOG event detection",
-                      default=10)
+                      help="Filter high cut-off frequency in Hz used for "
+                      "EOG event detection", default=10)
     parser.add_option("-p", "--preload", dest="preload",
-                      help="Temporary file used during computation (to save memory)",
-                      default=True)
+                      help="Temporary file used during computation (to "
+                      "save memory)", default=True)
     parser.add_option("-a", "--average", dest="average", action="store_true",
                       help="Compute SSP after averaging",
                       default=False)
@@ -70,36 +71,36 @@ if __name__ == '__main__':
                       help="Number of taps to use for filtering",
                       default=2048)
     parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
-                      help="Number of jobs to run in parallel",
-                      default=1)
+                      help="Number of jobs to run in parallel", default=1)
     parser.add_option("--rej-grad", dest="rej_grad", type="float",
-                      help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)",
-                      default=2000)
+                      help="Gradiometers rejection parameter in fT/cm (peak "
+                      "to peak amplitude)", default=2000)
     parser.add_option("--rej-mag", dest="rej_mag", type="float",
-                      help="Magnetometers rejection parameter in fT (peak to peak amplitude)",
-                      default=3000)
+                      help="Magnetometers rejection parameter in fT (peak to "
+                      "peak amplitude)", default=3000)
     parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
-                      help="EEG rejection parameter in uV (peak to peak amplitude)",
-                      default=50)
+                      help="EEG rejection parameter in uV (peak to peak "
+                      "amplitude)", default=50)
     parser.add_option("--rej-eog", dest="rej_eog", type="float",
-                      help="EOG rejection parameter in uV (peak to peak amplitude)",
-                      default=1e9)
+                      help="EOG rejection parameter in uV (peak to peak "
+                      "amplitude)", default=1e9)
     parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
                       help="Add EEG average reference proj",
                       default=False)
     parser.add_option("--no-proj", dest="no_proj", action="store_true",
-                      help="Exclude the SSP projectors currently in the fiff file",
-                      default=False)
+                      help="Exclude the SSP projectors currently in the "
+                      "fiff file",  default=False)
     parser.add_option("--bad", dest="bad_fname",
-                      help="Text file containing bad channels list (one per line)",
-                      default=None)
+                      help="Text file containing bad channels list "
+                      "(one per line)", default=None)
     parser.add_option("--event-id", dest="event_id", type="int",
                       help="ID to use for events", default=998)
     parser.add_option("--event-raw", dest="raw_event_fname",
                       help="raw file to use for event detection", default=None)
     parser.add_option("--tstart", dest="tstart", type="float",
-                      help="Start artifact detection after tstart seconds", default=0.)
-    parser.add_option("-c","--channel", dest="ch_name", type="string",
+                      help="Start artifact detection after tstart seconds",
+                      default=0.)
+    parser.add_option("-c", "--channel", dest="ch_name", type="string",
                       help="Custom EOG channel(s), comma separated",
                       default=None)
 
@@ -138,7 +139,8 @@ if __name__ == '__main__':
     ch_name = options.ch_name
 
     if bad_fname is not None:
-        bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
+        with open(bad_fname, 'r') as fid:
+            bads = [w.rstrip() for w in fid.readlines()]
         print('Bad channels read : %s' % bads)
     else:
         bads = []
@@ -151,9 +153,9 @@ if __name__ == '__main__':
     eog_event_fname = prefix + '_eog-eve.fif'
 
     if average:
-        eog_proj_fname = prefix + '_eog_avg_proj.fif'
+        eog_proj_fname = prefix + '_eog_avg-proj.fif'
     else:
-        eog_proj_fname = prefix + '_eog_proj.fif'
+        eog_proj_fname = prefix + '_eog-proj.fif'
 
     raw = mne.io.Raw(raw_in, preload=preload)
 
@@ -163,14 +165,14 @@ if __name__ == '__main__':
         raw_event = raw
 
     flat = None  # XXX : not exposed to the user
-    projs, events = mne.preprocessing.compute_proj_eog(raw=raw,
-                    raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad,
-                    n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq,
-                    average=average, filter_length=filter_length,
-                    n_jobs=n_jobs, reject=reject, flat=flat, bads=bads,
-                    avg_ref=avg_ref, no_proj=no_proj, event_id=event_id,
-                    eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq, 
-                    tstart=tstart, ch_name=ch_name, copy=False)
+    projs, events = mne.preprocessing.compute_proj_eog(
+        raw=raw, raw_event=raw_event, tmin=tmin, tmax=tmax, n_grad=n_grad,
+        n_mag=n_mag, n_eeg=n_eeg, l_freq=l_freq, h_freq=h_freq,
+        average=average, filter_length=filter_length,
+        n_jobs=n_jobs, reject=reject, flat=flat, bads=bads,
+        avg_ref=avg_ref, no_proj=no_proj, event_id=event_id,
+        eog_l_freq=eog_l_freq, eog_h_freq=eog_h_freq,
+        tstart=tstart, ch_name=ch_name, copy=False)
 
     raw.close()
 
@@ -190,3 +192,7 @@ if __name__ == '__main__':
 
     print("Writing EOG events in %s" % eog_event_fname)
     mne.write_events(eog_event_fname, events)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py
index 1adc15e..42b58d8 100644
--- a/mne/commands/mne_coreg.py
+++ b/mne/commands/mne_coreg.py
@@ -13,7 +13,7 @@ import sys
 import mne
 
 
-if __name__ == '__main__':
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -21,4 +21,9 @@ if __name__ == '__main__':
 
     os.environ['ETS_TOOLKIT'] = 'qt4'
     mne.gui.coregistration()
-    sys.exit(0)
+    if is_main:
+        sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_flash_bem.py b/mne/commands/mne_flash_bem.py
new file mode 100644
index 0000000..f46f0a2
--- /dev/null
+++ b/mne/commands/mne_flash_bem.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+"""Create 3-Layers BEM model from Flash MRI images
+
+This program assumes that FreeSurfer and MNE are installed and
+sourced properly.
+
+This function extracts the BEM surfaces (outer skull, inner skull, and
+outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30
+degrees. The multiecho FLASH data are inputted in DICOM format.
+This function assumes that the Freesurfer segmentation of the subject
+has been completed. In particular, the T1.mgz and brain.mgz MRI volumes
+should be, as usual, in the subject's mri directory.
+
+Before running this script do the following:
+(unless the --noconvert option is specified)
+
+    1. Copy all of your FLASH images in a single directory <source> and
+       create a directory <dest> to hold the output of mne_organize_dicom
+    2. cd to <dest> and run
+       $ mne_organize_dicom <source>
+       to create an appropriate directory structure
+    3. Create symbolic links to make flash05 and flash30 point to the
+       appropriate series:
+       $ ln -s <FLASH 5 series dir> flash05
+       $ ln -s <FLASH 30 series dir> flash30
+    4. cd to the directory where flash05 and flash30 links are
+    5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately
+    6. Run this script
+
+Example usage:
+
+$ mne flash_bem --subject sample
+
+"""
+from __future__ import print_function
+
+# Authors: Lorenzo De Santis
+
+from mne.bem import convert_flash_mris, make_flash_bem
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name", default=None)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=None)
+    parser.add_option("-3", "--noflash30", dest="noflash30",
+                      action="store_true", default=False,
+                      help=("Skip the 30-degree flip angle data"),)
+    parser.add_option("-n", "--noconvert", dest="noconvert",
+                      action="store_true", default=False,
+                      help=("Assume that the Flash MRI images have already "
+                            "been converted to mgz files"))
+    parser.add_option("-u", "--unwarp", dest="unwarp",
+                      action="store_true", default=False,
+                      help=("Run grad_unwarp with -unwarp <type> option on "
+                            "each of the converted data sets"))
+    parser.add_option("-o", "--overwrite", dest="overwrite",
+                      action="store_true", default=False,
+                      help="Write over existing .surf files in bem folder")
+    parser.add_option("-v", "--view", dest="show", action="store_true",
+                      help="Show BEM model in 3D for visual inspection",
+                      default=False)
+
+    options, args = parser.parse_args()
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    flash30 = not options.noflash30
+    convert = not options.noconvert
+    unwarp = options.unwarp
+    overwrite = options.overwrite
+    show = options.show
+
+    if options.subject is None:
+        parser.print_help()
+        raise RuntimeError('The subject argument must be set')
+
+    convert_flash_mris(subject=subject, subjects_dir=subjects_dir,
+                       flash30=flash30, convert=convert, unwarp=unwarp)
+    make_flash_bem(subject=subject, subjects_dir=subjects_dir,
+                   overwrite=overwrite, show=show)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_flash_bem_model.py b/mne/commands/mne_flash_bem_model.py
index 595583b..2cd6580 100755
--- a/mne/commands/mne_flash_bem_model.py
+++ b/mne/commands/mne_flash_bem_model.py
@@ -18,12 +18,15 @@ from __future__ import print_function
 # Authors:  Rey Rene Ramirez, Ph.D.   e-mail: rrramir at uw.edu
 #           Alexandre Gramfort, Ph.D.
 
-
+import sys
 import math
 import os
+
 import mne
+from mne.utils import deprecated
 
 
+ at deprecated("This function is deprecated, use mne_flash_bem instead")
 def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
     """Create 3-Layers BEM model from Flash MRI images
 
@@ -74,10 +77,10 @@ def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
         os.mkdir("parameter_maps")
     print("--- Converting Flash 5")
     os.system('mri_convert -flip_angle %s -tr 25 %s mef05.mgz' %
-                                            (5 * math.pi / 180, flash05))
+              (5 * math.pi / 180, flash05))
     print("--- Converting Flash 30")
     os.system('mri_convert -flip_angle %s -tr 25 %s mef30.mgz' %
-                                            (30 * math.pi / 180, flash30))
+              (30 * math.pi / 180, flash30))
     print("--- Running mne_flash_bem")
     os.system('mne_flash_bem --noconvert')
     os.chdir(os.path.join(subjects_dir, subject, 'bem'))
@@ -92,16 +95,16 @@ def make_flash_bem(subject, subjects_dir, flash05, flash30, show=False):
         skull_col = (0.91, 0.89, 0.67)
         brain_col = (0.67, 0.89, 0.91)  # light blue
         colors = [head_col, skull_col, brain_col]
-        from enthought.mayavi import mlab
+        from mayavi import mlab
         mlab.clf()
         for fname, c in zip(fnames, colors):
             points, faces = mne.read_surface(fname)
-            mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
-                                 color=c, opacity=0.3)
+            mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                                 faces, color=c, opacity=0.3)
         mlab.show()
 
-if __name__ == '__main__':
 
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -110,21 +113,25 @@ if __name__ == '__main__':
     subjects_dir = os.environ.get('SUBJECTS_DIR')
 
     parser.add_option("-s", "--subject", dest="subject",
-                    help="Subject name", default=subject)
+                      help="Subject name", default=subject)
     parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
-                    help="Subjects directory", default=subjects_dir)
+                      help="Subjects directory", default=subjects_dir)
     parser.add_option("-5", "--flash05", dest="flash05",
-                    help=("Path to FLASH sequence with a spin angle of 5 "
-                          "degrees in Nifti format"), metavar="FILE")
+                      help=("Path to FLASH sequence with a spin angle of 5 "
+                            "degrees in Nifti format"), metavar="FILE")
     parser.add_option("-3", "--flash30", dest="flash30",
-                    help=("Path to FLASH sequence with a spin angle of 30 "
-                          "degrees in Nifti format"), metavar="FILE")
+                      help=("Path to FLASH sequence with a spin angle of 30 "
+                            "degrees in Nifti format"), metavar="FILE")
     parser.add_option("-v", "--view", dest="show", action="store_true",
                       help="Show BEM model in 3D for visual inspection",
                       default=False)
 
     options, args = parser.parse_args()
 
+    if options.flash05 is None or options.flash30 is None:
+        parser.print_help()
+        sys.exit(1)
+
     subject = options.subject
     subjects_dir = options.subjects_dir
     flash05 = os.path.abspath(options.flash05)
@@ -132,3 +139,7 @@ if __name__ == '__main__':
     show = options.show
 
     make_flash_bem(subject, subjects_dir, flash05, flash30, show=show)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_freeview_bem_surfaces.py b/mne/commands/mne_freeview_bem_surfaces.py
new file mode 100644
index 0000000..16607e8
--- /dev/null
+++ b/mne/commands/mne_freeview_bem_surfaces.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+"""View the 3-Layers BEM model using Freeview
+
+"""
+from __future__ import print_function
+
+# Authors:  Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+
+import sys
+import os
+import os.path as op
+
+from mne.utils import run_subprocess, get_subjects_dir
+
+
+def freeview_bem_surfaces(subject, subjects_dir, method):
+    """View 3-Layers BEM model with Freeview
+
+    Parameters
+    ----------
+    subject : string
+        Subject name
+    subjects_dir : string
+        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
+    method : string
+        Can be 'flash' or 'watershed'.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+
+    env = os.environ.copy()
+    env['SUBJECT'] = subject
+    env['SUBJECTS_DIR'] = subjects_dir
+
+    if 'FREESURFER_HOME' not in env:
+        raise RuntimeError('The FreeSurfer environment needs to be set up.')
+
+    mri_dir = op.join(subjects_dir, subject, 'mri')
+    bem_dir = op.join(subjects_dir, subject, 'bem')
+    mri = op.join(mri_dir, 'T1.mgz')
+
+    if method == 'watershed':
+        bem_dir = op.join(bem_dir, 'watershed')
+        outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject)
+        outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject)
+        inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject)
+    else:
+        if method == 'flash':
+            bem_dir = op.join(bem_dir, 'flash')
+        outer_skin = op.join(bem_dir, 'outer_skin.surf')
+        outer_skull = op.join(bem_dir, 'outer_skull.surf')
+        inner_skull = op.join(bem_dir, 'inner_skull.surf')
+
+    # put together the command
+    cmd = ['freeview']
+    cmd += ["--volume", mri]
+    cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull]
+    cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull]
+    cmd += ["--surface",
+            "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin]
+
+    run_subprocess(cmd, env=env, stdout=sys.stdout)
+    print("[done]")
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    subject = os.environ.get('SUBJECT')
+    subjects_dir = get_subjects_dir()
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name", default=subject)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=subjects_dir)
+    parser.add_option("-m", "--method", dest="method",
+                      help=("Method used to generate the BEM model. "
+                            "Can be flash or watershed."), metavar="FILE")
+
+    options, args = parser.parse_args()
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    method = options.method
+
+    freeview_bem_surfaces(subject, subjects_dir, method)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_kit2fiff.py b/mne/commands/mne_kit2fiff.py
index 68197ff..c013deb 100755
--- a/mne/commands/mne_kit2fiff.py
+++ b/mne/commands/mne_kit2fiff.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-# Authors: Teon Brooks  <teon at nyu.edu>
+# Authors: Teon Brooks  <teon.brooks at gmail.com>
 
 """ Import KIT / NYU data to fif file.
 
@@ -14,8 +14,8 @@ import sys
 import mne
 from mne.io import read_raw_kit
 
-if __name__ == '__main__':
 
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -57,7 +57,7 @@ if __name__ == '__main__':
     out_fname = options.out_fname
 
     if isinstance(stim, str):
-        stim = stim.split(':')
+        stim = map(int, stim.split(':'))
 
     raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname,
                        hsp=hsp_fname, stim=stim, slope=slope,
@@ -66,3 +66,7 @@ if __name__ == '__main__':
     raw.save(out_fname)
     raw.close()
     sys.exit(0)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py
index a1a18ff..af1bae7 100755
--- a/mne/commands/mne_make_scalp_surfaces.py
+++ b/mne/commands/mne_make_scalp_surfaces.py
@@ -14,14 +14,25 @@ example usage: mne make_scalp_surfaces --overwrite --subject sample
 from __future__ import print_function
 
 import os
+import copy
 import os.path as op
 import sys
 import mne
+from mne.utils import run_subprocess, _TempDir, verbose, logger
 
-if __name__ == '__main__':
+
+def _check_file(fname, overwrite):
+    """Helper to prevent overwrites"""
+    if op.isfile(fname) and not overwrite:
+        raise IOError('File %s exists, use --overwrite to overwrite it'
+                      % fname)
+
+
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
+    subjects_dir = mne.get_config('SUBJECTS_DIR')
 
     parser.add_option('-o', '--overwrite', dest='overwrite',
                       action='store_true',
@@ -32,56 +43,53 @@ if __name__ == '__main__':
                       help='Force transformation of surface into bem.')
     parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
                       help='Print the debug messages.')
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=subjects_dir)
 
     options, args = parser.parse_args()
 
-    env = os.environ
-    subject = vars(options).get('subject', env.get('SUBJECT'))
-    if subject is None:
+    subject = vars(options).get('subject', os.getenv('SUBJECT'))
+    subjects_dir = options.subjects_dir
+    if subject is None or subjects_dir is None:
         parser.print_help()
         sys.exit(1)
+    _run(subjects_dir, subject, options.force, options.overwrite,
+         options.verbose)
 
-    overwrite = options.overwrite
-    verbose = options.verbose
-    force = '--force' if options.force else '--check'
-
-    from mne.commands.utils import get_status_output
-    def my_run_cmd(cmd, err_msg):
-        sig, out, error = get_status_output(cmd)
-        if verbose:
-            print(out, error)
-        if sig != 0:
-            print(err_msg)
-            sys.exit(1)
-
-    if not 'SUBJECTS_DIR' in env:
-        print('The environment variable SUBJECTS_DIR should be set')
-        sys.exit(1)
 
-    if not op.isabs(env['SUBJECTS_DIR']):
-        env['SUBJECTS_DIR'] = op.abspath(env['SUBJECTS_DIR'])
-    subj_dir = env['SUBJECTS_DIR']
+ at verbose
+def _run(subjects_dir, subject, force, overwrite, verbose=None):
+    this_env = copy.copy(os.environ)
+    this_env['SUBJECTS_DIR'] = subjects_dir
+    this_env['SUBJECT'] = subject
 
-    if not 'MNE_ROOT' in env:
-        print('MNE_ROOT environment variable is not set')
-        sys.exit(1)
+    if 'SUBJECTS_DIR' not in this_env:
+        raise RuntimeError('The environment variable SUBJECTS_DIR should '
+                           'be set')
 
-    if not 'FREESURFER_HOME' in env:
-        print('The FreeSurfer environment needs to be set up for this script')
-        sys.exit(1)
+    if not op.isdir(subjects_dir):
+        raise RuntimeError('subjects directory %s not found, specify using '
+                           'the environment variable SUBJECTS_DIR or '
+                           'the command line option --subjects-dir')
 
-    subj_path = op.join(subj_dir, subject)
+    if 'MNE_ROOT' not in this_env:
+        raise RuntimeError('MNE_ROOT environment variable is not set')
+
+    if 'FREESURFER_HOME' not in this_env:
+        raise RuntimeError('The FreeSurfer environment needs to be set up '
+                           'for this script')
+    force = '--force' if force else '--check'
+    subj_path = op.join(subjects_dir, subject)
     if not op.exists(subj_path):
-        print(('%s does not exits. Please check your subject directory '
-               'path.' % subj_path))
-        sys.exit(1)
+        raise RuntimeError('%s does not exits. Please check your subject '
+                           'directory path.' % subj_path)
 
     if op.exists(op.join(subj_path, 'mri', 'T1.mgz')):
         mri = 'T1.mgz'
     else:
         mri = 'T1'
 
-    print('1. Creating a dense scalp tessellation with mkheadsurf...')
+    logger.info('1. Creating a dense scalp tessellation with mkheadsurf...')
 
     def check_seghead(surf_path=op.join(subj_path, 'surf')):
         for k in ['/lh.seghead', '/lh.smseghead']:
@@ -92,40 +100,45 @@ if __name__ == '__main__':
 
     my_seghead = check_seghead()
     if my_seghead is None:
-        cmd = 'mkheadsurf -subjid %s -srcvol %s >/dev/null' % (subject, mri)
-        my_run_cmd(cmd, 'mkheadsurf failed')
-    else:
-        print('%s/surf/%s already there' % (subj_path, my_seghead))
-        if not overwrite:
-            print('Use the --overwrite option to replace exisiting surfaces.')
-            sys.exit()
+        run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri],
+                       env=this_env)
 
     surf = check_seghead()
     if surf is None:
-        print('mkheadsurf did not produce the standard output file.')
-        sys.exit(1)
-
-    fif = '{0}/{1}/bem/{1}-head-dense.fif'.format(subj_dir, subject)
-    print('2. Creating %s ...' % fif)
-    cmd = 'mne_surf2bem --surf %s --id 4 %s --fif %s' % (surf, force, fif)
-    my_run_cmd(cmd, 'Failed to create %s, see above' % fif)
+        raise RuntimeError('mkheadsurf did not produce the standard output '
+                           'file.')
+
+    dense_fname = '{0}/{1}/bem/{1}-head-dense.fif'.format(subjects_dir,
+                                                          subject)
+    logger.info('2. Creating %s ...' % dense_fname)
+    _check_file(dense_fname, overwrite)
+    run_subprocess(['mne_surf2bem', '--surf', surf, '--id', '4', force,
+                    '--fif', dense_fname], env=this_env)
     levels = 'medium', 'sparse'
-    for ii, (n_tri, level) in enumerate(zip([30000, 2500], levels), 3):
-        my_surf = mne.read_bem_surfaces(fif)[0]
-        print('%i. Creating medium grade tessellation...' % ii)
-        print('%i.1 Decimating the dense tessellation...' % ii)
+    my_surf = mne.read_bem_surfaces(dense_fname)[0]
+    tris = [30000, 2500]
+    if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true':
+        tris = [len(my_surf['tris'])]  # don't actually decimate
+    for ii, (n_tri, level) in enumerate(zip(tris, levels), 3):
+        logger.info('%i. Creating %s tessellation...' % (ii, level))
+        logger.info('%i.1 Decimating the dense tessellation...' % ii)
         points, tris = mne.decimate_surface(points=my_surf['rr'],
                                             triangles=my_surf['tris'],
                                             n_triangles=n_tri)
-        out_fif = fif.replace('dense', level)
-        print('%i.2 Creating %s' % (ii, out_fif))
-        surf_fname = '/tmp/tmp-surf.surf'
+        other_fname = dense_fname.replace('dense', level)
+        logger.info('%i.2 Creating %s' % (ii, other_fname))
+        _check_file(other_fname, overwrite)
+        tempdir = _TempDir()
+        surf_fname = tempdir + '/tmp-surf.surf'
         # convert points to meters, make mne_analyze happy
         mne.write_surface(surf_fname, points * 1e3, tris)
         # XXX for some reason --check does not work here.
-        cmd = 'mne_surf2bem --surf %s --id 4 --force --fif %s'
-        cmd %= (surf_fname, out_fif)
-        my_run_cmd(cmd, 'Failed to create %s, see above' % out_fif)
-        os.remove(surf_fname)
-
-    sys.exit(0)
+        try:
+            run_subprocess(['mne_surf2bem', '--surf', surf_fname, '--id', '4',
+                            '--force', '--fif', other_fname], env=this_env)
+        finally:
+            del tempdir
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_maxfilter.py b/mne/commands/mne_maxfilter.py
index e727c7c..dd5607c 100755
--- a/mne/commands/mne_maxfilter.py
+++ b/mne/commands/mne_maxfilter.py
@@ -17,77 +17,76 @@ import os
 import mne
 
 
-if __name__ == '__main__':
-
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
 
     parser.add_option("-i", "--in", dest="in_fname",
-                    help="Input raw FIF file", metavar="FILE")
+                      help="Input raw FIF file", metavar="FILE")
     parser.add_option("-o", dest="out_fname",
-                    help="Output FIF file (if not set, suffix  '_sss' will be used)",
-                    metavar="FILE", default=None)
+                      help="Output FIF file (if not set, suffix  '_sss' will "
+                      "be used)", metavar="FILE", default=None)
     parser.add_option("--origin", dest="origin",
-                    help="Head origin in mm, or a filename to read the origin from. "
-                    "If not set it will be estimated from headshape points",
-                    default=None)
+                      help="Head origin in mm, or a filename to read the "
+                      "origin from. If not set it will be estimated from "
+                      "headshape points", default=None)
     parser.add_option("--origin-out", dest="origin_out",
-                    help="Filename to use for computed origin", default=None)
+                      help="Filename to use for computed origin", default=None)
     parser.add_option("--frame", dest="frame", type="string",
-                    help="Coordinate frame for head center ('device' or 'head')",
-                    default="device")
+                      help="Coordinate frame for head center ('device' or "
+                      "'head')", default="device")
     parser.add_option("--bad", dest="bad", type="string",
-                    help="List of static bad channels",
-                    default=None)
+                      help="List of static bad channels",
+                      default=None)
     parser.add_option("--autobad", dest="autobad", type="string",
-                    help="Set automated bad channel detection ('on', 'off', 'n')",
-                    default="off")
+                      help="Set automated bad channel detection ('on', 'off', "
+                      "'n')", default="off")
     parser.add_option("--skip", dest="skip",
-                    help="Skips raw data sequences, time intervals pairs in sec, e.g.: 0 30 120 150",
-                    default=None)
+                      help="Skips raw data sequences, time intervals pairs in "
+                      "sec, e.g.: 0 30 120 150", default=None)
     parser.add_option("--force", dest="force", action="store_true",
-                    help="Ignore program warnings",
-                    default=False)
+                      help="Ignore program warnings",
+                      default=False)
     parser.add_option("--st", dest="st", action="store_true",
-                    help="Apply the time-domain MaxST extension",
-                    default=False)
+                      help="Apply the time-domain MaxST extension",
+                      default=False)
     parser.add_option("--buflen", dest="st_buflen", type="float",
-                    help="MaxSt buffer length in sec",
-                    default=16.0)
+                      help="MaxSt buffer length in sec",
+                      default=16.0)
     parser.add_option("--corr", dest="st_corr", type="float",
-                    help="MaxSt subspace correlation",
-                    default=0.96)
+                      help="MaxSt subspace correlation",
+                      default=0.96)
     parser.add_option("--trans", dest="mv_trans",
-                    help="Transforms the data into the coil definitions of in_fname, or into the default frame",
-                    default=None)
+                      help="Transforms the data into the coil definitions of "
+                      "in_fname, or into the default frame", default=None)
     parser.add_option("--movecomp", dest="mv_comp", action="store_true",
-                    help="Estimates and compensates head movements in continuous raw data",
-                    default=False)
+                      help="Estimates and compensates head movements in "
+                      "continuous raw data", default=False)
     parser.add_option("--headpos", dest="mv_headpos", action="store_true",
-                    help="Estimates and stores head position parameters, but does not compensate movements",
-                    default=False)
+                      help="Estimates and stores head position parameters, "
+                      "but does not compensate movements", default=False)
     parser.add_option("--hp", dest="mv_hp", type="string",
-                    help="Stores head position data in an ascii file",
-                    default=None)
+                      help="Stores head position data in an ascii file",
+                      default=None)
     parser.add_option("--hpistep", dest="mv_hpistep", type="float",
-                    help="Sets head position update interval in ms",
-                    default=None)
+                      help="Sets head position update interval in ms",
+                      default=None)
     parser.add_option("--hpisubt", dest="mv_hpisubt", type="string",
-                    help="Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off",
-                    default=None)
+                      help="Subtracts hpi signals: sine amplitudes, amp + "
+                      "baseline, or switch off", default=None)
     parser.add_option("--nohpicons", dest="mv_hpicons", action="store_false",
-                    help="Do not check initial consistency isotrak vs hpifit",
-                    default=True)
+                      help="Do not check initial consistency isotrak vs "
+                      "hpifit", default=True)
     parser.add_option("--linefreq", dest="linefreq", type="float",
-                    help="Sets the basic line interference frequency (50 or 60 Hz)",
-                    default=None)
+                      help="Sets the basic line interference frequency (50 or "
+                      "60 Hz)", default=None)
     parser.add_option("--nooverwrite", dest="overwrite", action="store_false",
-                    help="Do not overwrite output file if it already exists",
-                    default=True)
+                      help="Do not overwrite output file if it already exists",
+                      default=True)
     parser.add_option("--args", dest="mx_args", type="string",
-                    help="Additional command line arguments to pass to MaxFilter",
-                    default="")
+                      help="Additional command line arguments to pass to "
+                      "MaxFilter", default="")
 
     options, args = parser.parse_args()
 
@@ -131,14 +130,19 @@ if __name__ == '__main__':
             out_fname = prefix + '_sss.fif'
 
     if origin is not None and os.path.exists(origin):
-        origin = open(origin, 'r').readlines()[0].strip()
+        with open(origin, 'r') as fid:
+            origin = fid.readlines()[0].strip()
 
-    origin = mne.preprocessing.apply_maxfilter(in_fname, out_fname, origin, frame,
-                    bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans,
-                    mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons,
-                    linefreq, mx_args, overwrite)
+    origin = mne.preprocessing.apply_maxfilter(
+        in_fname, out_fname, origin, frame,
+        bad, autobad, skip, force, st, st_buflen, st_corr, mv_trans,
+        mv_comp, mv_headpos, mv_hp, mv_hpistep, mv_hpisubt, mv_hpicons,
+        linefreq, mx_args, overwrite)
 
     if origin_out is not None:
-        fid = open(origin_out, 'w')
-        fid.write(origin + '\n')
-        fid.close()
+        with open(origin_out, 'w') as fid:
+            fid.write(origin + '\n')
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_report.py b/mne/commands/mne_report.py
index e646b64..417730e 100644
--- a/mne/commands/mne_report.py
+++ b/mne/commands/mne_report.py
@@ -9,11 +9,19 @@ MNE-sample-data/MEG/sample/sample_audvis-ave.fif -d MNE-sample-data/subjects/ \
 
 """
 
+import sys
+import time
+
 from mne.report import Report
+from mne.utils import verbose, logger
+
 
+ at verbose
+def log_elapsed(t, verbose=None):
+    logger.info('Report complete in %s seconds' % round(t, 1))
 
-if __name__ == '__main__':
 
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
@@ -23,6 +31,15 @@ if __name__ == '__main__':
     parser.add_option("-i", "--info", dest="info_fname",
                       help="File from which info dictionary is to be read",
                       metavar="FILE")
+    parser.add_option("-c", "--cov", dest="cov_fname",
+                      help="File from which noise covariance is to be read",
+                      metavar="FILE")
+    parser.add_option("--bmin", dest="bmin",
+                      help="Time at which baseline correction starts for "
+                      "evokeds", default=None)
+    parser.add_option("--bmax", dest="bmax",
+                      help="Time at which baseline correction stops for "
+                      "evokeds", default=None)
     parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
                       help="The subjects directory")
     parser.add_option("-s", "--subject", dest="subject",
@@ -35,18 +52,42 @@ if __name__ == '__main__':
                       help="Overwrite html report if it already exists")
     parser.add_option("-j", "--jobs", dest="n_jobs", help="Number of jobs to"
                       " run in parallel")
+    parser.add_option("-m", "--mri-decim", type="int", dest="mri_decim",
+                      default=2, help="Integer factor used to decimate "
+                      "BEM plots")
 
     options, args = parser.parse_args()
     path = options.path
+    if path is None:
+        parser.print_help()
+        sys.exit(1)
     info_fname = options.info_fname
+    cov_fname = options.cov_fname
     subjects_dir = options.subjects_dir
     subject = options.subject
+    mri_decim = int(options.mri_decim)
     verbose = True if options.verbose is not None else False
     open_browser = False if options.no_browser is not None else True
     overwrite = True if options.overwrite is not None else False
     n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1
 
-    report = Report(info_fname, subjects_dir=subjects_dir, subject=subject,
-                    verbose=verbose)
-    report.parse_folder(path, verbose=verbose, n_jobs=n_jobs)
+    bmin = float(options.bmin) if options.bmin is not None else None
+    bmax = float(options.bmax) if options.bmax is not None else None
+    # XXX: this means (None, None) cannot be specified through command line
+    if bmin is None and bmax is None:
+        baseline = None
+    else:
+        baseline = (bmin, bmax)
+
+    t0 = time.time()
+    report = Report(info_fname, subjects_dir=subjects_dir,
+                    subject=subject, baseline=baseline,
+                    cov_fname=cov_fname, verbose=verbose)
+    report.parse_folder(path, verbose=verbose, n_jobs=n_jobs,
+                        mri_decim=mri_decim)
+    log_elapsed(time.time() - t0, verbose=verbose)
     report.save(open_browser=open_browser, overwrite=overwrite)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py
index 07e447b..dd822b0 100755
--- a/mne/commands/mne_surf2bem.py
+++ b/mne/commands/mne_surf2bem.py
@@ -16,18 +16,18 @@ import sys
 
 import mne
 
-if __name__ == '__main__':
 
+def run():
     from mne.commands.utils import get_optparser
 
     parser = get_optparser(__file__)
 
     parser.add_option("-s", "--surf", dest="surf",
-                    help="Surface in Freesurfer format", metavar="FILE")
+                      help="Surface in Freesurfer format", metavar="FILE")
     parser.add_option("-f", "--fif", dest="fif",
-                    help="FIF file produced", metavar="FILE")
+                      help="FIF file produced", metavar="FILE")
     parser.add_option("-i", "--id", dest="id", default=4,
-                    help=("Surface Id (e.g. 4 sur head surface)"))
+                      help=("Surface Id (e.g. 4 sur head surface)"))
 
     options, args = parser.parse_args()
 
@@ -36,9 +36,13 @@ if __name__ == '__main__':
         sys.exit(1)
 
     print("Converting %s to BEM FIF file." % options.surf)
-
     points, tris = mne.read_surface(options.surf)
     points *= 1e-3
     surf = dict(coord_frame=5, id=int(options.id), nn=None, np=len(points),
                 ntri=len(tris), rr=points, sigma=1, tris=tris)
     mne.write_bem_surface(options.fif, surf)
+
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/commands/mne_watershed_bem.py b/mne/commands/mne_watershed_bem.py
new file mode 100644
index 0000000..8efe423
--- /dev/null
+++ b/mne/commands/mne_watershed_bem.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Authors: Lorenzo De Santis
+"""
+
+    Create BEM surfaces using the watershed algorithm included with
+        FreeSurfer
+
+"""
+
+from __future__ import print_function
+import sys
+
+from mne.bem import make_watershed_bem
+
+
+def run():
+    from mne.commands.utils import get_optparser
+
+    parser = get_optparser(__file__)
+
+    parser.add_option("-s", "--subject", dest="subject",
+                      help="Subject name (required)", default=None)
+    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
+                      help="Subjects directory", default=None)
+    parser.add_option("-o", "--overwrite", dest="overwrite",
+                      help="Write over existing files", action="store_true")
+    parser.add_option("-v", "--volume", dest="volume",
+                      help="Defaults to T1", default='T1')
+    parser.add_option("-a", "--atlas", dest="atlas",
+                      help="Specify the --atlas option for mri_watershed",
+                      default=False, action="store_true")
+    parser.add_option("-g", "--gcaatlas", dest="gcaatlas",
+                      help="Use the subcortical atlas", default=False,
+                      action="store_true")
+    parser.add_option("-p", "--preflood", dest="preflood",
+                      help="Change the preflood height", default=None)
+    parser.add_option("--verbose", dest="verbose",
+                      help="If not None, override default verbose level",
+                      default=None)
+
+    options, args = parser.parse_args()
+
+    if options.subject is None:
+        parser.print_help()
+        sys.exit(1)
+
+    subject = options.subject
+    subjects_dir = options.subjects_dir
+    overwrite = options.overwrite
+    volume = options.volume
+    atlas = options.atlas
+    gcaatlas = options.gcaatlas
+    preflood = options.preflood
+    verbose = options.verbose
+
+    make_watershed_bem(subject=subject, subjects_dir=subjects_dir,
+                       overwrite=overwrite, volume=volume, atlas=atlas,
+                       gcaatlas=gcaatlas, preflood=preflood, verbose=verbose)
+
+is_main = (__name__ == '__main__')
+if is_main:
+    run()
diff --git a/mne/layouts/tests/__init__.py b/mne/commands/tests/__init__.py
similarity index 100%
rename from mne/layouts/tests/__init__.py
rename to mne/commands/tests/__init__.py
diff --git a/mne/commands/tests/test_commands.py b/mne/commands/tests/test_commands.py
new file mode 100644
index 0000000..89574e1
--- /dev/null
+++ b/mne/commands/tests/test_commands.py
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+import os
+from os import path as op
+import shutil
+import glob
+import warnings
+from nose.tools import assert_true, assert_raises
+
+from mne.commands import (mne_browse_raw, mne_bti2fiff, mne_clean_eog_ecg,
+                          mne_compute_proj_ecg, mne_compute_proj_eog,
+                          mne_coreg, mne_flash_bem_model, mne_kit2fiff,
+                          mne_make_scalp_surfaces, mne_maxfilter,
+                          mne_report, mne_surf2bem, mne_watershed_bem,
+                          mne_compare_fiff, mne_flash_bem)
+from mne.utils import (run_tests_if_main, _TempDir, requires_mne, requires_PIL,
+                       requires_mayavi, requires_tvtk, requires_freesurfer,
+                       ArgvSetter, slow_test, ultra_slow_test)
+from mne.io import Raw
+from mne.datasets import testing, sample
+
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+subjects_dir = op.join(testing.data_path(download=False), 'subjects')
+
+warnings.simplefilter('always')
+
+
+def check_usage(module, force_help=False):
+    """Helper to ensure we print usage"""
+    args = ('--help',) if force_help else ()
+    with ArgvSetter(args) as out:
+        try:
+            module.run()
+        except SystemExit:
+            pass
+        assert_true('Usage: ' in out.stdout.getvalue())
+
+
+ at slow_test
+def test_browse_raw():
+    """Test mne browse_raw"""
+    check_usage(mne_browse_raw)
+
+
+def test_bti2fiff():
+    """Test mne bti2fiff"""
+    check_usage(mne_bti2fiff)
+
+
+def test_compare_fiff():
+    """Test mne compare_fiff"""
+    check_usage(mne_compare_fiff)
+
+
+ at requires_mne
+def test_clean_eog_ecg():
+    """Test mne clean_eog_ecg"""
+    check_usage(mne_clean_eog_ecg)
+    tempdir = _TempDir()
+    raw = Raw([raw_fname, raw_fname, raw_fname])
+    raw.info['bads'] = ['MEG 2443']
+    use_fname = op.join(tempdir, op.basename(raw_fname))
+    raw.save(use_fname)
+    with ArgvSetter(('-i', use_fname, '--quiet')):
+        mne_clean_eog_ecg.run()
+    fnames = glob.glob(op.join(tempdir, '*proj.fif'))
+    assert_true(len(fnames) == 2)  # two projs
+    fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
+    assert_true(len(fnames) == 3)  # raw plus two projs
+
+
+ at slow_test
+def test_compute_proj_ecg_eog():
+    """Test mne compute_proj_ecg/eog"""
+    for fun in (mne_compute_proj_ecg, mne_compute_proj_eog):
+        check_usage(fun)
+        tempdir = _TempDir()
+        use_fname = op.join(tempdir, op.basename(raw_fname))
+        bad_fname = op.join(tempdir, 'bads.txt')
+        with open(bad_fname, 'w') as fid:
+            fid.write('MEG 2443\n')
+        shutil.copyfile(raw_fname, use_fname)
+        with ArgvSetter(('-i', use_fname, '--bad=' + bad_fname,
+                         '--rej-eeg', '150')):
+            fun.run()
+        fnames = glob.glob(op.join(tempdir, '*proj.fif'))
+        assert_true(len(fnames) == 1)
+        fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
+        assert_true(len(fnames) == 1)
+
+
+def test_coreg():
+    """Test mne coreg"""
+    assert_true(hasattr(mne_coreg, 'run'))
+
+
+def test_flash_bem_model():
+    """Test mne flash_bem_model"""
+    assert_true(hasattr(mne_flash_bem_model, 'run'))
+    check_usage(mne_flash_bem_model)
+
+
+def test_kit2fiff():
+    """Test mne kit2fiff"""
+    # Can't check
+    check_usage(mne_kit2fiff, force_help=True)
+
+
+ at requires_tvtk
+ at requires_mne
+ at testing.requires_testing_data
+def test_make_scalp_surfaces():
+    """Test mne make_scalp_surfaces"""
+    check_usage(mne_make_scalp_surfaces)
+    # Copy necessary files to avoid FreeSurfer call
+    tempdir = _TempDir()
+    surf_path = op.join(subjects_dir, 'sample', 'surf')
+    surf_path_new = op.join(tempdir, 'sample', 'surf')
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(surf_path_new)
+    os.mkdir(op.join(tempdir, 'sample', 'bem'))
+    shutil.copy(op.join(surf_path, 'lh.seghead'), surf_path_new)
+
+    orig_fs = os.getenv('FREESURFER_HOME', None)
+    orig_mne = os.getenv('MNE_ROOT')
+    if orig_fs is not None:
+        del os.environ['FREESURFER_HOME']
+    cmd = ('-s', 'sample', '--subjects-dir', tempdir)
+    os.environ['_MNE_TESTING_SCALP'] = 'true'
+    try:
+        with ArgvSetter(cmd, disable_stdout=False, disable_stderr=False):
+            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
+            os.environ['FREESURFER_HOME'] = tempdir  # don't need it
+            del os.environ['MNE_ROOT']
+            assert_raises(RuntimeError, mne_make_scalp_surfaces.run)
+            os.environ['MNE_ROOT'] = orig_mne
+            mne_make_scalp_surfaces.run()
+            assert_raises(IOError, mne_make_scalp_surfaces.run)  # no overwrite
+    finally:
+        if orig_fs is not None:
+            os.environ['FREESURFER_HOME'] = orig_fs
+        os.environ['MNE_ROOT'] = orig_mne
+        del os.environ['_MNE_TESTING_SCALP']
+
+
+def test_maxfilter():
+    """Test mne maxfilter"""
+    check_usage(mne_maxfilter)
+    with ArgvSetter(('-i', raw_fname, '--st', '--movecomp', '--linefreq', '60',
+                     '--trans', raw_fname)) as out:
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            os.environ['_MNE_MAXFILTER_TEST'] = 'true'
+            try:
+                mne_maxfilter.run()
+            finally:
+                del os.environ['_MNE_MAXFILTER_TEST']
+        assert_true(len(w) == 1)
+        for check in ('maxfilter', '-trans', '-movecomp'):
+            assert_true(check in out.stdout.getvalue(), check)
+
+
+ at slow_test
+ at requires_mayavi
+ at requires_PIL
+ at testing.requires_testing_data
+def test_report():
+    """Test mne report"""
+    check_usage(mne_report)
+    tempdir = _TempDir()
+    use_fname = op.join(tempdir, op.basename(raw_fname))
+    shutil.copyfile(raw_fname, use_fname)
+    with ArgvSetter(('-p', tempdir, '-i', use_fname, '-d', subjects_dir,
+                     '-s', 'sample', '--no-browser', '-m', '30')):
+        mne_report.run()
+    fnames = glob.glob(op.join(tempdir, '*.html'))
+    assert_true(len(fnames) == 1)
+
+
+def test_surf2bem():
+    """Test mne surf2bem"""
+    check_usage(mne_surf2bem)
+
+
+ at ultra_slow_test
+ at requires_freesurfer
+ at testing.requires_testing_data
+def test_watershed_bem():
+    """Test mne watershed bem"""
+    check_usage(mne_watershed_bem)
+    # Copy necessary files to tempdir
+    tempdir = _TempDir()
+    mridata_path = op.join(subjects_dir, 'sample', 'mri')
+    mridata_path_new = op.join(tempdir, 'sample', 'mri')
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(mridata_path_new)
+    if op.exists(op.join(mridata_path, 'T1')):
+        shutil.copytree(op.join(mridata_path, 'T1'), op.join(mridata_path_new,
+                        'T1'))
+    if op.exists(op.join(mridata_path, 'T1.mgz')):
+        shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
+                        op.join(mridata_path_new, 'T1.mgz'))
+
+    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-o'),
+                    disable_stdout=False, disable_stderr=False):
+        mne_watershed_bem.run()
+
+
+ at slow_test
+ at requires_mne
+ at requires_freesurfer
+ at sample.requires_sample_data
+def test_flash_bem():
+    """Test mne flash_bem"""
+    check_usage(mne_flash_bem, force_help=True)
+    # Using the sample dataset
+    subjects_dir = op.join(sample.data_path(download=False), 'subjects')
+    # Copy necessary files to tempdir
+    tempdir = _TempDir()
+    mridata_path = op.join(subjects_dir, 'sample', 'mri')
+    mridata_path_new = op.join(tempdir, 'sample', 'mri')
+    os.makedirs(op.join(mridata_path_new, 'flash'))
+    os.makedirs(op.join(tempdir, 'sample', 'bem'))
+    shutil.copyfile(op.join(mridata_path, 'T1.mgz'),
+                    op.join(mridata_path_new, 'T1.mgz'))
+    shutil.copyfile(op.join(mridata_path, 'brain.mgz'),
+                    op.join(mridata_path_new, 'brain.mgz'))
+    # Copy the available mri/flash/mef*.mgz files from the dataset
+    files = glob.glob(op.join(mridata_path, 'flash', 'mef*.mgz'))
+    for infile in files:
+        shutil.copyfile(infile, op.join(mridata_path_new, 'flash',
+                                        op.basename(infile)))
+    # Test mne flash_bem with --noconvert option
+    # (since there are no DICOM Flash images in dataset)
+    currdir = os.getcwd()
+    with ArgvSetter(('-d', tempdir, '-s', 'sample', '-n'),
+                    disable_stdout=False, disable_stderr=False):
+        mne_flash_bem.run()
+    os.chdir(currdir)
+
+
+run_tests_if_main()
diff --git a/mne/commands/utils.py b/mne/commands/utils.py
index 2e97e03..2957300 100644
--- a/mne/commands/utils.py
+++ b/mne/commands/utils.py
@@ -1,5 +1,3 @@
-#emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- 
-#ex: set sts=4 ts=4 sw=4 noet:
 """Some utility functions for commands (e.g. for cmdline handling)
 """
 
@@ -7,21 +5,28 @@
 #
 # License: BSD (3-clause)
 
-import imp, os, re
+import imp
+import os
+import re
 from optparse import OptionParser
-from subprocess import Popen, PIPE
 
 import mne
 
-def get_optparser(cmdpath):
+
+def get_optparser(cmdpath, usage=None):
     """Create OptionParser with cmd source specific settings (e.g. prog value)
     """
     command = os.path.basename(cmdpath)
     if re.match('mne_(.*).py', command):
         command = command[4:-3]
+    elif re.match('mne_(.*).pyc', command):
+        command = command[4:-4]
 
     # Fetch description
-    mod = imp.load_source('__temp', cmdpath)
+    if cmdpath.endswith('.pyc'):
+        mod = imp.load_compiled('__temp', cmdpath)
+    else:
+        mod = imp.load_source('__temp', cmdpath)
     if mod.__doc__:
         doc, description, epilog = mod.__doc__, None, None
 
@@ -35,14 +40,6 @@ def get_optparser(cmdpath):
     parser = OptionParser(prog="mne %s" % command,
                           version=mne.__version__,
                           description=description,
-                          epilog=epilog)
+                          epilog=epilog, usage=usage)
 
     return parser
-
-def get_status_output(cmd):
-    """ Replacement for commands.getstatusoutput which has been deprecated since 2.6
-        Returns the error status, output and error output"""
-    pipe = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
-    output, error = pipe.communicate()
-    status = pipe.returncode
-    return status, output, error
diff --git a/mne/connectivity/effective.py b/mne/connectivity/effective.py
index ad3e085..636661b 100644
--- a/mne/connectivity/effective.py
+++ b/mne/connectivity/effective.py
@@ -42,10 +42,9 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
 
     Parameters
     ----------
-    data : array, shape=(n_epochs, n_signals, n_times)
-           or list/generator of array, shape =(n_signals, n_times)
-           or list/generator of SourceEstimate
-           or Epochs
+    data : array-like, shape=(n_epochs, n_signals, n_times)
+        Can also be a list/generator of array, shape =(n_signals, n_times);
+        list/generator of SourceEstimate; or Epochs.
         The data from which to compute connectivity. Note that it is also
         possible to combine multiple signals by providing a list of tuples,
         e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
@@ -115,9 +114,9 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
     """
     logger.info('Estimating phase slope index (PSI)')
     # estimate the coherency
-    cohy, freqs_, times, n_epochs, n_tapers = spectral_connectivity(data,
-        method='cohy', indices=indices, sfreq=sfreq, mode=mode, fmin=fmin,
-        fmax=fmax, fskip=0, faverage=False, tmin=tmin, tmax=tmax,
+    cohy, freqs_, times, n_epochs, n_tapers = spectral_connectivity(
+        data, method='cohy', indices=indices, sfreq=sfreq, mode=mode,
+        fmin=fmin, fmax=fmax, fskip=0, faverage=False, tmin=tmin, tmax=tmax,
         mt_bandwidth=mt_bandwidth, mt_adaptive=mt_adaptive,
         mt_low_bias=mt_low_bias, cwt_frequencies=cwt_frequencies,
         cwt_n_cycles=cwt_n_cycles, block_size=block_size, n_jobs=n_jobs,
diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py
index 2b4957c..264d25a 100644
--- a/mne/connectivity/spectral.py
+++ b/mne/connectivity/spectral.py
@@ -18,7 +18,7 @@ from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
                                          _psd_from_mt, _csd_from_mt,
                                          _psd_from_mt_adaptive)
 from ..time_frequency.tfr import morlet, cwt
-from ..utils import logger, verbose
+from ..utils import logger, verbose, _time_mask
 
 ########################################################################
 # Various connectivity estimators
@@ -181,7 +181,7 @@ class _WPLIEst(_EpochMeanConEstBase):
     def __init__(self, n_cons, n_freqs, n_times):
         super(_WPLIEst, self).__init__(n_cons, n_freqs, n_times)
 
-        #store  both imag(csd) and abs(imag(csd))
+        # store  both imag(csd) and abs(imag(csd))
         acc_shape = (2,) + self.csd_shape
         self._acc = np.zeros(acc_shape)
 
@@ -217,7 +217,7 @@ class _WPLIDebiasedEst(_EpochMeanConEstBase):
 
     def __init__(self, n_cons, n_freqs, n_times):
         super(_WPLIDebiasedEst, self).__init__(n_cons, n_freqs, n_times)
-        #store imag(csd), abs(imag(csd)), imag(csd)^2
+        # store imag(csd), abs(imag(csd)), imag(csd)^2
         acc_shape = (3,) + self.csd_shape
         self._acc = np.zeros(acc_shape)
 
@@ -260,7 +260,7 @@ class _PPCEst(_EpochMeanConEstBase):
     def __init__(self, n_cons, n_freqs, n_times):
         super(_PPCEst, self).__init__(n_cons, n_freqs, n_times)
 
-        #store csd / abs(csd)
+        # store csd / abs(csd)
         self._acc = np.zeros(self.csd_shape, dtype=np.complex128)
 
     def accumulate(self, con_idx, csd_xy):
@@ -280,18 +280,19 @@ class _PPCEst(_EpochMeanConEstBase):
 
         # note: we use the trick from fieldtrip to compute the
         # the estimate over all pairwise epoch combinations
-        con = ((self._acc[con_idx] * np.conj(self._acc[con_idx]) - n_epochs)
-               / (n_epochs * (n_epochs - 1.)))
+        con = ((self._acc[con_idx] * np.conj(self._acc[con_idx]) - n_epochs) /
+               (n_epochs * (n_epochs - 1.)))
 
         self.con_scores[con_idx] = np.real(con)
 
 
 ###############################################################################
 def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
-                                 mode, window_fun, eigvals, wavelets, freq_mask,
-                                 mt_adaptive, idx_map, block_size, psd,
-                                 accumulate_psd, con_method_types, con_methods,
-                                 n_signals, n_times, accumulate_inplace=True):
+                                 mode, window_fun, eigvals, wavelets,
+                                 freq_mask, mt_adaptive, idx_map, block_size,
+                                 psd, accumulate_psd, con_method_types,
+                                 con_methods, n_signals, n_times,
+                                 accumulate_inplace=True):
     """Connectivity estimation for one epoch see spectral_connectivity"""
 
     n_cons = len(idx_map[0])
@@ -321,16 +322,16 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
             this_n_sig = this_data.shape[0]
             sig_pos_end = sig_pos_start + this_n_sig
             if not isinstance(sig_idx, slice):
-                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start)
-                                & (sig_idx < sig_pos_end)] - sig_pos_start
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
+                                       (sig_idx < sig_pos_end)] - sig_pos_start
             else:
                 this_sig_idx = sig_idx
             if isinstance(this_data, _BaseSourceEstimate):
                 _mt_spectra_partial = partial(_mt_spectra, dpss=window_fun,
                                               sfreq=sfreq)
-                this_x_mt = this_data.transform_data(_mt_spectra_partial,
-                                        idx=this_sig_idx, tmin_idx=tmin_idx,
-                                        tmax_idx=tmax_idx)
+                this_x_mt = this_data.transform_data(
+                    _mt_spectra_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
+                    tmax_idx=tmax_idx)
             else:
                 this_x_mt, _ = _mt_spectra(this_data[this_sig_idx,
                                                      tmin_idx:tmax_idx],
@@ -338,8 +339,8 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
 
             if mt_adaptive:
                 # compute PSD and adaptive weights
-                _this_psd, weights = _psd_from_mt_adaptive(this_x_mt, eigvals,
-                                        freq_mask, return_weights=True)
+                _this_psd, weights = _psd_from_mt_adaptive(
+                    this_x_mt, eigvals, freq_mask, return_weights=True)
 
                 # only keep freqs of interest
                 this_x_mt = this_x_mt[:, :, freq_mask]
@@ -375,22 +376,22 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
             this_n_sig = this_data.shape[0]
             sig_pos_end = sig_pos_start + this_n_sig
             if not isinstance(sig_idx, slice):
-                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start)
-                    & (sig_idx < sig_pos_end)] - sig_pos_start
+                this_sig_idx = sig_idx[(sig_idx >= sig_pos_start) &
+                                       (sig_idx < sig_pos_end)] - sig_pos_start
             else:
                 this_sig_idx = sig_idx
             if isinstance(this_data, _BaseSourceEstimate):
                 cwt_partial = partial(cwt, Ws=wavelets, use_fft=True,
                                       mode='same')
-                this_x_cwt = this_data.transform_data(cwt_partial,
-                                idx=this_sig_idx, tmin_idx=tmin_idx,
-                                tmax_idx=tmax_idx)
+                this_x_cwt = this_data.transform_data(
+                    cwt_partial, idx=this_sig_idx, tmin_idx=tmin_idx,
+                    tmax_idx=tmax_idx)
             else:
                 this_x_cwt = cwt(this_data[this_sig_idx, tmin_idx:tmax_idx],
                                  wavelets, use_fft=True, mode='same')
 
             if accumulate_psd:
-                this_psd.append(np.abs(this_x_cwt) ** 2)
+                this_psd.append((this_x_cwt * this_x_cwt.conj()).real)
 
             x_cwt.append(this_x_cwt)
 
@@ -437,8 +438,8 @@ def _epoch_spectral_connectivity(data, sig_idx, tmin_idx, tmax_idx, sfreq,
         for i in range(0, n_cons, block_size):
             con_idx = slice(i, i + block_size)
 
-            csd = x_cwt[idx_map[0][con_idx]]\
-                  * np.conjugate(x_cwt[idx_map[1][con_idx]])
+            csd = x_cwt[idx_map[0][con_idx]] * \
+                np.conjugate(x_cwt[idx_map[1][con_idx]])
             for method in con_methods:
                 method.accumulate(con_idx, csd)
 
@@ -518,8 +519,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                           mt_low_bias=True, cwt_frequencies=None,
                           cwt_n_cycles=7, block_size=1000, n_jobs=1,
                           verbose=None):
-    """Compute various frequency-domain and time-frequency domain connectivity
-    measures.
+    """Compute frequency-domain and time-frequency domain connectivity measures
 
     The connectivity method(s) are specified using the "method" parameter.
     All methods are based on estimates of the cross- and power spectral
@@ -536,96 +536,98 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
     connectivity matrix). If one is only interested in the connectivity
     between some signals, the "indices" parameter can be used. For example,
     to compute the connectivity between the signal with index 0 and signals
-    "2, 3, 4" (a total of 3 connections) one can use the following:
+    "2, 3, 4" (a total of 3 connections) one can use the following::
 
-    indices = (np.array([0, 0, 0],    # row indices
-               np.array([2, 3, 4])))  # col indices
+        indices = (np.array([0, 0, 0]),    # row indices
+                   np.array([2, 3, 4]))    # col indices
 
-    con_flat = spectral_connectivity(data, method='coh', indices=indices, ...)
+        con_flat = spectral_connectivity(data, method='coh',
+                                         indices=indices, ...)
 
     In this case con_flat.shape = (3, n_freqs). The connectivity scores are
     in the same order as defined indices.
 
-    Supported Connectivity Measures:
+    **Supported Connectivity Measures**
 
     The connectivity method(s) is specified using the "method" parameter. The
-    following methods are supported (note: E[] denotes average over epochs).
-    Multiple measures can be computed at once by using a list/tuple, e.g.
-    "['coh', 'pli']" to compute coherence and PLI.
+    following methods are supported (note: ``E[]`` denotes average over
+    epochs). Multiple measures can be computed at once by using a list/tuple,
+    e.g., ``['coh', 'pli']`` to compute coherence and PLI.
 
-    'coh' : Coherence given by
+        'coh' : Coherence given by::
 
-                 | E[Sxy] |
-        C = ---------------------
-            sqrt(E[Sxx] * E[Syy])
+                     | E[Sxy] |
+            C = ---------------------
+                sqrt(E[Sxx] * E[Syy])
 
-    'cohy' : Coherency given by
+        'cohy' : Coherency given by::
 
-                   E[Sxy]
-        C = ---------------------
-            sqrt(E[Sxx] * E[Syy])
+                       E[Sxy]
+            C = ---------------------
+                sqrt(E[Sxx] * E[Syy])
 
-    'imcoh' : Imaginary coherence [1] given by
+        'imcoh' : Imaginary coherence [1]_ given by::
 
-                  Im(E[Sxy])
-        C = ----------------------
-            sqrt(E[Sxx] * E[Syy])
+                      Im(E[Sxy])
+            C = ----------------------
+                sqrt(E[Sxx] * E[Syy])
 
-    'plv' : Phase-Locking Value (PLV) [2] given by
+        'plv' : Phase-Locking Value (PLV) [2]_ given by::
 
-        PLV = |E[Sxy/|Sxy|]|
+            PLV = |E[Sxy/|Sxy|]|
 
-    'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator of squared
-            PLV [3].
+        'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator
+        of squared PLV [3]_.
 
-    'pli' : Phase Lag Index (PLI) [4] given by
+        'pli' : Phase Lag Index (PLI) [4]_ given by::
 
-        PLI = |E[sign(Im(Sxy))]|
+            PLI = |E[sign(Im(Sxy))]|
 
-    'pli2_unbiased' : Unbiased estimator of squared PLI [5].
+        'pli2_unbiased' : Unbiased estimator of squared PLI [5]_.
 
-    'wpli' : Weighted Phase Lag Index (WPLI) [5] given by
+        'wpli' : Weighted Phase Lag Index (WPLI) [5]_ given by::
 
-                  |E[Im(Sxy)]|
-        WPLI = ------------------
-                  E[|Im(Sxy)|]
+                      |E[Im(Sxy)]|
+            WPLI = ------------------
+                      E[|Im(Sxy)|]
+
+        'wpli2_debiased' : Debiased estimator of squared WPLI [5].
 
-    'wpli2_debiased' : Debiased estimator of squared WPLI [5].
 
     References
     ----------
 
-    [1] Nolte et al. "Identifying true brain interaction from EEG data using
-        the imaginary part of coherency" Clinical neurophysiology, vol. 115,
-        no. 10, pp. 2292-2307, Oct. 2004.
+    .. [1] Nolte et al. "Identifying true brain interaction from EEG data using
+           the imaginary part of coherency" Clinical neurophysiology, vol. 115,
+           no. 10, pp. 2292-2307, Oct. 2004.
 
-    [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human brain
-        mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
+    .. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human
+           brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999.
 
-    [3] Vinck et al. "The pairwise phase consistency: a bias-free measure of
-        rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
-        pp. 112-122, May 2010.
+    .. [3] Vinck et al. "The pairwise phase consistency: a bias-free measure of
+           rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1,
+           pp. 112-122, May 2010.
 
-    [4] Stam et al. "Phase lag index: assessment of functional connectivity
-        from multi channel EEG and MEG with diminished bias from common
-        sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
-        Nov. 2007.
+    .. [4] Stam et al. "Phase lag index: assessment of functional connectivity
+           from multi channel EEG and MEG with diminished bias from common
+           sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193,
+           Nov. 2007.
 
-    [5] Vinck et al. "An improved index of phase-synchronization for electro-
-        physiological data in the presence of volume-conduction, noise and
-        sample-size bias" NeuroImage, vol. 55, no. 4, pp. 1548-1565, Apr. 2011.
+    .. [5] Vinck et al. "An improved index of phase-synchronization for
+           electro-physiological data in the presence of volume-conduction,
+           noise and sample-size bias" NeuroImage, vol. 55, no. 4,
+           pp. 1548-1565, Apr. 2011.
 
     Parameters
     ----------
-    data : array, shape=(n_epochs, n_signals, n_times)
-           or list/generator of array, shape =(n_signals, n_times)
-           or list/generator of SourceEstimate or VolSourceEstimate
-           or Epochs
+    data : array-like, shape=(n_epochs, n_signals, n_times) | Epochs
         The data from which to compute connectivity. Note that it is also
         possible to combine multiple signals by providing a list of tuples,
         e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
         corresponds to 3 epochs, and arr_* could be an array with the same
-        number of time points as stc_*.
+        number of time points as stc_*. The array-like object can also
+        be a list/generator of array, shape =(n_signals, n_times),
+        or a list/generator of SourceEstimate or VolSourceEstimate objects.
     method : string | list of string
         Connectivity measure(s) to compute.
     indices : tuple of arrays | None
@@ -705,8 +707,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
     """
     if n_jobs > 1:
         parallel, my_epoch_spectral_connectivity, _ = \
-                parallel_func(_epoch_spectral_connectivity, n_jobs,
-                              verbose=verbose)
+            parallel_func(_epoch_spectral_connectivity, n_jobs,
+                          verbose=verbose)
 
     # format fmin and fmax and check inputs
     if fmin is None:
@@ -746,12 +748,12 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                    for mtype in con_method_types]
 
     # we only support 3 or 5 arguments
-    if any([n not in (3, 5) for n in n_comp_args]):
+    if any(n not in (3, 5) for n in n_comp_args):
         raise ValueError('The compute_con function needs to have either '
                          '3 or 5 arguments')
 
     # if none of the comp_con functions needs the PSD, we don't estimate it
-    accumulate_psd = any([n == 5 for n in n_comp_args])
+    accumulate_psd = any(n == 5 for n in n_comp_args)
 
     if isinstance(data, Epochs):
         times_in = data.times  # input times for Epochs input type
@@ -768,8 +770,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
             first_epoch = epoch_block[0]
 
             # get the data size and time scale
-            n_signals, n_times_in, times_in =\
-                    _get_and_verify_data_sizes(first_epoch)
+            n_signals, n_times_in, times_in = \
+                _get_and_verify_data_sizes(first_epoch)
 
             if times_in is None:
                 # we are not using Epochs or SourceEstimate(s) as input
@@ -777,16 +779,11 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                                        endpoint=False)
 
             n_times_in = len(times_in)
-            tmin_idx = 0
-            tmax_idx = n_times_in
-            tmin_true = times_in[0]
-            tmax_true = times_in[-1]
-            if tmin is not None:
-                tmin_idx = np.argmin(np.abs(times_in - tmin))
-                tmin_true = times_in[tmin_idx]
-            if tmax is not None:
-                tmax_idx = np.argmin(np.abs(times_in - tmax)) + 1
-                tmax_true = times_in[tmax_idx - 1]  # time of last point used
+            mask = _time_mask(times_in, tmin, tmax)
+            tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]]
+            tmax_idx += 1
+            tmin_true = times_in[tmin_idx]
+            tmax_true = times_in[tmax_idx - 1]  # time of last point used
 
             times = times_in[tmin_idx:tmax_idx]
             n_times = len(times)
@@ -859,9 +856,10 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
             for i, n_f_band in enumerate([len(f) for f in freqs_bands]):
                 if n_f_band == 0:
                     raise ValueError('There are no frequency points between '
-                        '%0.1fHz and %0.1fHz. Change the band specification '
-                        '(fmin, fmax) or the frequency resolution.'
-                        % (fmin[i], fmax[i]))
+                                     '%0.1fHz and %0.1fHz. Change the band '
+                                     'specification (fmin, fmax) or the '
+                                     'frequency resolution.'
+                                     % (fmin[i], fmax[i]))
 
             if n_bands == 1:
                 logger.info('    frequencies: %0.1fHz..%0.1fHz (%d points)'
@@ -872,7 +870,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                 for i, bfreqs in enumerate(freqs_bands):
                     logger.info('     band %d: %0.1fHz..%0.1fHz '
                                 '(%d points)' % (i + 1, bfreqs[0],
-                                bfreqs[-1], len(bfreqs)))
+                                                 bfreqs[-1], len(bfreqs)))
 
             if faverage:
                 logger.info('    connectivity scores will be averaged for '
@@ -922,7 +920,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                 if len(cwt_n_cycles) > 1:
                     if len(cwt_n_cycles) != len(cwt_frequencies):
                         raise ValueError('cwt_n_cycles must be float or an '
-                            'array with the same size as cwt_frequencies')
+                                         'array with the same size as '
+                                         'cwt_frequencies')
                     cwt_n_cycles = cwt_n_cycles[freq_mask]
 
                 # get the Morlet wavelets
@@ -956,7 +955,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                            for mtype in con_method_types]
 
             sep = ', '
-            metrics_str = sep.join([method.name for method in con_methods])
+            metrics_str = sep.join([meth.name for meth in con_methods])
             logger.info('    the following metrics will be computed: %s'
                         % metrics_str)
 
@@ -972,7 +971,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
                             % (epoch_idx + 1))
 
                 # con methods and psd are updated inplace
-                _epoch_spectral_connectivity(this_epoch, sig_idx, tmin_idx,
+                _epoch_spectral_connectivity(
+                    this_epoch, sig_idx, tmin_idx,
                     tmax_idx, sfreq, mode, window_fun, eigvals, wavelets,
                     freq_mask, mt_adaptive, idx_map, block_size, psd,
                     accumulate_psd, con_method_types, con_methods,
@@ -983,11 +983,12 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
             logger.info('    computing connectivity for epochs %d..%d'
                         % (epoch_idx + 1, epoch_idx + len(epoch_block)))
 
-            out = parallel(my_epoch_spectral_connectivity(this_epoch, sig_idx,
-                    tmin_idx, tmax_idx, sfreq, mode, window_fun, eigvals,
-                    wavelets, freq_mask, mt_adaptive, idx_map, block_size, psd,
-                    accumulate_psd, con_method_types, None, n_signals, n_times,
-                    accumulate_inplace=False) for this_epoch in epoch_block)
+            out = parallel(my_epoch_spectral_connectivity(
+                this_epoch, sig_idx,
+                tmin_idx, tmax_idx, sfreq, mode, window_fun, eigvals,
+                wavelets, freq_mask, mt_adaptive, idx_map, block_size, psd,
+                accumulate_psd, con_method_types, None, n_signals, n_times,
+                accumulate_inplace=False) for this_epoch in epoch_block)
 
             # do the accumulation
             for this_out in out:
@@ -1042,8 +1043,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
         con_flat = con
         con = []
         for this_con_flat in con_flat:
-            this_con = np.zeros((n_signals, n_signals)
-                                + this_con_flat.shape[1:],
+            this_con = np.zeros((n_signals, n_signals) +
+                                this_con_flat.shape[1:],
                                 dtype=this_con_flat.dtype)
             this_con[indices_use] = this_con_flat
             con.append(this_con)
diff --git a/mne/connectivity/tests/test_effective.py b/mne/connectivity/tests/test_effective.py
index 13f5072..2615f53 100644
--- a/mne/connectivity/tests/test_effective.py
+++ b/mne/connectivity/tests/test_effective.py
@@ -19,21 +19,21 @@ def test_psi():
         data[i, 1, 10:] = data[i, 0, :-10]  # signal 0 is ahead
         data[i, 2, :-10] = data[i, 0, 10:]  # signal 2 is ahead
 
-    psi, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
-        mode='fourier', sfreq=sfreq)
+    psi, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='fourier', sfreq=sfreq)
     assert_true(psi[1, 0, 0] < 0)
     assert_true(psi[2, 0, 0] > 0)
 
     indices = (np.array([0]), np.array([1]))
-    psi_2, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
-        mode='fourier', sfreq=sfreq, indices=indices)
+    psi_2, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='fourier', sfreq=sfreq, indices=indices)
 
     # the measure is symmetric (sign flip)
     assert_array_almost_equal(psi_2[0, 0], -psi[1, 0, 0])
 
     cwt_freqs = np.arange(5., 20, 0.5)
-    psi_cwt, freqs, times, n_epochs, n_tapers = phase_slope_index(data,
-        mode='cwt_morlet', sfreq=sfreq, cwt_frequencies=cwt_freqs,
+    psi_cwt, freqs, times, n_epochs, n_tapers = phase_slope_index(
+        data, mode='cwt_morlet', sfreq=sfreq, cwt_frequencies=cwt_freqs,
         indices=indices)
 
     assert_true(np.all(psi_cwt > 0))
diff --git a/mne/connectivity/tests/test_spectral.py b/mne/connectivity/tests/test_spectral.py
index 3ce45eb..8678f5b 100644
--- a/mne/connectivity/tests/test_spectral.py
+++ b/mne/connectivity/tests/test_spectral.py
@@ -1,14 +1,20 @@
+import os
 import numpy as np
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true, assert_raises
+from nose.plugins.skip import SkipTest
+import warnings
 
 from mne.fixes import tril_indices
 from mne.connectivity import spectral_connectivity
 from mne.connectivity.spectral import _CohEst
 
 from mne import SourceEstimate
+from mne.utils import run_tests_if_main, slow_test
 from mne.filter import band_pass_filter
 
+warnings.simplefilter('always')
+
 
 def _stc_gen(data, sfreq, tmin, combo=False):
     """Simulate a SourceEstimate generator"""
@@ -26,16 +32,21 @@ def _stc_gen(data, sfreq, tmin, combo=False):
             yield (arr, stc)
 
 
+ at slow_test
 def test_spectral_connectivity():
     """Test frequency-domain connectivity methods"""
+    # XXX For some reason on 14 Oct 2015 Travis started timing out on this
+    # test, so for a quick workaround we will skip it:
+    if os.getenv('TRAVIS', 'false') == 'true':
+        raise SkipTest('Travis is broken')
     # Use a case known to have no spurious correlations (it would bad if
     # nosetests could randomly fail):
     np.random.seed(0)
 
     sfreq = 50.
     n_signals = 3
-    n_epochs = 10
-    n_times = 500
+    n_epochs = 8
+    n_times = 256
 
     tmin = 0.
     tmax = (n_times - 1) / sfreq
@@ -44,7 +55,10 @@ def test_spectral_connectivity():
     # simulate connectivity from 5Hz..15Hz
     fstart, fend = 5.0, 15.0
     for i in range(n_epochs):
-        data[i, 1, :] = band_pass_filter(data[i, 0, :], sfreq, fstart, fend)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            data[i, 1, :] = band_pass_filter(data[i, 0, :],
+                                             sfreq, fstart, fend)
         # add some noise, so the spectrum is not exactly zero
         data[i, 1, :] += 1e-2 * np.random.randn(n_times)
 
@@ -62,8 +76,8 @@ def test_spectral_connectivity():
     assert_raises(ValueError, spectral_connectivity, data, fmin=(11,),
                   fmax=(12, 15))
 
-    methods = ['coh', 'imcoh', 'cohy', 'plv', 'ppc', 'pli', 'pli2_unbiased',
-               'wpli', 'wpli2_debiased', 'coh']
+    methods = ['coh', 'cohy', 'imcoh', ['plv', 'ppc', 'pli', 'pli2_unbiased',
+               'wpli', 'wpli2_debiased', 'coh']]
 
     modes = ['multitaper', 'fourier', 'cwt_morlet']
 
@@ -91,12 +105,11 @@ def test_spectral_connectivity():
                 else:
                     mt_bandwidth = None
 
-                con, freqs, times, n, _ = spectral_connectivity(data,
-                        method=method, mode=mode,
-                        indices=None, sfreq=sfreq, mt_adaptive=adaptive,
-                        mt_low_bias=True, mt_bandwidth=mt_bandwidth,
-                        cwt_frequencies=cwt_frequencies,
-                        cwt_n_cycles=cwt_n_cycles)
+                con, freqs, times, n, _ = spectral_connectivity(
+                    data, method=method, mode=mode, indices=None, sfreq=sfreq,
+                    mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles)
 
                 assert_true(n == n_epochs)
                 assert_array_almost_equal(times_data, times)
@@ -122,18 +135,18 @@ def test_spectral_connectivity():
                 elif method == 'cohy':
                     idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
                     # imaginary coh will be zero
-                    assert_true(np.all(np.imag(con[1, 0, idx[0]:idx[1]])
-                                < lower_t))
+                    assert_true(np.all(np.imag(con[1, 0, idx[0]:idx[1]]) <
+                                lower_t))
                     # we see something for zero-lag
-                    assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]])
-                                > upper_t))
+                    assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]]) >
+                                upper_t))
 
                     idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
                     if mode != 'cwt_morlet':
-                        assert_true(np.all(np.abs(con[1, 0, :idx[0]])
-                                    < lower_t))
-                        assert_true(np.all(np.abs(con[1, 0, idx[1]:])
-                                    < lower_t))
+                        assert_true(np.all(np.abs(con[1, 0, :idx[0]]) <
+                                    lower_t))
+                        assert_true(np.all(np.abs(con[1, 0, idx[1]:]) <
+                                    lower_t))
                 elif method == 'imcoh':
                     idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
                     # imaginary coh will be zero
@@ -142,53 +155,73 @@ def test_spectral_connectivity():
                     assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
                     assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
 
-                # compute same connections using indices and 2 jobs,
-                # also add a second method
+                # compute same connections using indices and 2 jobs
                 indices = tril_indices(n_signals, -1)
 
-                test_methods = (method, _CohEst)
-                combo = True if method == 'coh' else False
+                if not isinstance(method, list):
+                    test_methods = (method, _CohEst)
+                else:
+                    test_methods = method
+
                 stc_data = _stc_gen(data, sfreq, tmin)
-                con2, freqs2, times2, n2, _ = spectral_connectivity(stc_data,
-                        method=test_methods, mode=mode, indices=indices,
-                        sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
-                        mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
-                        cwt_frequencies=cwt_frequencies,
-                        cwt_n_cycles=cwt_n_cycles, n_jobs=2)
+                con2, freqs2, times2, n2, _ = spectral_connectivity(
+                    stc_data, method=test_methods, mode=mode, indices=indices,
+                    sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
+                    cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles, n_jobs=2)
 
                 assert_true(isinstance(con2, list))
-                assert_true(len(con2) == 2)
+                assert_true(len(con2) == len(test_methods))
 
                 if method == 'coh':
                     assert_array_almost_equal(con2[0], con2[1])
 
-                con2 = con2[0]  # only keep the first method
+                if not isinstance(method, list):
+                    con2 = con2[0]  # only keep the first method
 
-                # we get the same result for the probed connections
-                assert_array_almost_equal(freqs, freqs2)
-                assert_array_almost_equal(con[indices], con2)
-                assert_true(n == n2)
-                assert_array_almost_equal(times_data, times2)
+                    # we get the same result for the probed connections
+                    assert_array_almost_equal(freqs, freqs2)
+                    assert_array_almost_equal(con[indices], con2)
+                    assert_true(n == n2)
+                    assert_array_almost_equal(times_data, times2)
+                else:
+                    # we get the same result for the probed connections
+                    assert_true(len(con) == len(con2))
+                    for c, c2 in zip(con, con2):
+                        assert_array_almost_equal(freqs, freqs2)
+                        assert_array_almost_equal(c[indices], c2)
+                        assert_true(n == n2)
+                        assert_array_almost_equal(times_data, times2)
 
                 # compute same connections for two bands, fskip=1, and f. avg.
                 fmin = (5., 15.)
                 fmax = (15., 30.)
-                con3, freqs3, times3, n3, _ = spectral_connectivity(data,
-                        method=method, mode=mode,
-                        indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax,
-                        fskip=1, faverage=True, mt_adaptive=adaptive,
-                        mt_low_bias=True, mt_bandwidth=mt_bandwidth,
-                        cwt_frequencies=cwt_frequencies,
-                        cwt_n_cycles=cwt_n_cycles)
+                con3, freqs3, times3, n3, _ = spectral_connectivity(
+                    data, method=method, mode=mode, indices=indices,
+                    sfreq=sfreq, fmin=fmin, fmax=fmax, fskip=1, faverage=True,
+                    mt_adaptive=adaptive, mt_low_bias=True,
+                    mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
+                    cwt_n_cycles=cwt_n_cycles)
 
                 assert_true(isinstance(freqs3, list))
                 assert_true(len(freqs3) == len(fmin))
                 for i in range(len(freqs3)):
-                    assert_true(np.all((freqs3[i] >= fmin[i])
-                                       & (freqs3[i] <= fmax[i])))
+                    assert_true(np.all((freqs3[i] >= fmin[i]) &
+                                       (freqs3[i] <= fmax[i])))
 
                 # average con2 "manually" and we get the same result
-                for i in range(len(freqs3)):
-                    freq_idx = np.searchsorted(freqs2, freqs3[i])
-                    con2_avg = np.mean(con2[:, freq_idx], axis=1)
-                    assert_array_almost_equal(con2_avg, con3[:, i])
+                if not isinstance(method, list):
+                    for i in range(len(freqs3)):
+                        freq_idx = np.searchsorted(freqs2, freqs3[i])
+                        con2_avg = np.mean(con2[:, freq_idx], axis=1)
+                        assert_array_almost_equal(con2_avg, con3[:, i])
+                else:
+                    for j in range(len(con2)):
+                        for i in range(len(freqs3)):
+                            freq_idx = np.searchsorted(freqs2, freqs3[i])
+                            con2_avg = np.mean(con2[j][:, freq_idx], axis=1)
+                            assert_array_almost_equal(con2_avg, con3[j][:, i])
+
+
+run_tests_if_main()
diff --git a/mne/coreg.py b/mne/coreg.py
index 2461380..d3df150 100644
--- a/mne/coreg.py
+++ b/mne/coreg.py
@@ -8,22 +8,21 @@ from .externals.six.moves import configparser
 import fnmatch
 from glob import glob, iglob
 import os
+import stat
+import sys
 import re
 import shutil
 from warnings import warn
 
 import numpy as np
 from numpy import dot
-from scipy.optimize import leastsq
-from scipy.spatial.distance import cdist
-from scipy.linalg import norm
 
 from .io.meas_info import read_fiducials, write_fiducials
 from .label import read_label, Label
 from .source_space import (add_source_space_distances, read_source_spaces,
                            write_source_spaces)
-from .surface import (read_surface, write_surface, read_bem_surfaces,
-                      write_bem_surface)
+from .surface import read_surface, write_surface
+from .bem import read_bem_surfaces, write_bem_surfaces
 from .transforms import rotation, rotation3d, scaling, translation
 from .utils import get_config, get_subjects_dir, logger, pformat
 from functools import reduce
@@ -42,6 +41,19 @@ fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
 src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
 
 
+def _make_writable(fname):
+    os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128)  # write
+
+
+def _make_writable_recursive(path):
+    """Recursively set writable"""
+    if sys.platform.startswith('win'):
+        return  # can't safely set perms
+    for root, dirs, files in os.walk(path, topdown=False):
+        for f in dirs + files:
+            _make_writable(os.path.join(root, f))
+
+
 def create_default_subject(mne_root=None, fs_home=None, update=False,
                            subjects_dir=None):
     """Create an average brain subject for subjects without structural MRI
@@ -92,44 +104,41 @@ def create_default_subject(mne_root=None, fs_home=None, update=False,
     if fs_home is None:
         fs_home = get_config('FREESURFER_HOME', fs_home)
         if fs_home is None:
-            err = ("FREESURFER_HOME environment variable not found. Please "
-                   "specify the fs_home parameter in your call to "
-                   "create_default_subject().")
-            raise ValueError(err)
+            raise ValueError(
+                "FREESURFER_HOME environment variable not found. Please "
+                "specify the fs_home parameter in your call to "
+                "create_default_subject().")
     if mne_root is None:
         mne_root = get_config('MNE_ROOT', mne_root)
         if mne_root is None:
-            err = ("MNE_ROOT environment variable not found. Please "
-                   "specify the mne_root parameter in your call to "
-                   "create_default_subject().")
-            raise ValueError(err)
+            raise ValueError("MNE_ROOT environment variable not found. Please "
+                             "specify the mne_root parameter in your call to "
+                             "create_default_subject().")
 
     # make sure freesurfer files exist
     fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
     if not os.path.exists(fs_src):
-        err = ('fsaverage not found at %r. Is fs_home specified '
-               'correctly?' % fs_src)
-        raise IOError(err)
+        raise IOError('fsaverage not found at %r. Is fs_home specified '
+                      'correctly?' % fs_src)
     for name in ('label', 'mri', 'surf'):
         dirname = os.path.join(fs_src, name)
         if not os.path.isdir(dirname):
-            err = ("Freesurfer fsaverage seems to be incomplete: No directory "
-                   "named %s found in %s" % (name, fs_src))
-            raise IOError(err)
+            raise IOError("Freesurfer fsaverage seems to be incomplete: No "
+                          "directory named %s found in %s" % (name, fs_src))
 
     # make sure destination does not already exist
     dest = os.path.join(subjects_dir, 'fsaverage')
     if dest == fs_src:
-        err = ("Your subjects_dir points to the freesurfer subjects_dir (%r). "
-               "The default subject can not be created in the freesurfer "
-               "installation directory; please specify a different "
-               "subjects_dir." % subjects_dir)
-        raise IOError(err)
+        raise IOError(
+            "Your subjects_dir points to the freesurfer subjects_dir (%r). "
+            "The default subject can not be created in the freesurfer "
+            "installation directory; please specify a different "
+            "subjects_dir." % subjects_dir)
     elif (not update) and os.path.exists(dest):
-        err = ("Can not create fsaverage because %r already exists in "
-               "subjects_dir %r. Delete or rename the existing fsaverage "
-               "subject folder." % ('fsaverage', subjects_dir))
-        raise IOError(err)
+        raise IOError(
+            "Can not create fsaverage because %r already exists in "
+            "subjects_dir %r. Delete or rename the existing fsaverage "
+            "subject folder." % ('fsaverage', subjects_dir))
 
     # make sure mne files exist
     mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
@@ -138,14 +147,14 @@ def create_default_subject(mne_root=None, fs_home=None, update=False,
     for name in mne_files:
         fname = mne_fname % name
         if not os.path.isfile(fname):
-            err = ("MNE fsaverage incomplete: %s file not found at "
-                   "%s" % (name, fname))
-            raise IOError(err)
+            raise IOError("MNE fsaverage incomplete: %s file not found at "
+                          "%s" % (name, fname))
 
     # copy fsaverage from freesurfer
     logger.info("Copying fsaverage subject from freesurfer directory...")
     if (not update) or not os.path.exists(dest):
         shutil.copytree(fs_src, dest)
+        _make_writable_recursive(dest)
 
     # add files from mne
     dest_bem = os.path.join(dest, 'bem')
@@ -153,6 +162,7 @@ def create_default_subject(mne_root=None, fs_home=None, update=False,
         os.mkdir(dest_bem)
     logger.info("Copying auxiliary fsaverage files from mne directory...")
     dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
+    _make_writable_recursive(dest_bem)
     for name in mne_files:
         if not os.path.exists(dest_fname % name):
             shutil.copy(mne_fname % name, dest_bem)
@@ -167,7 +177,7 @@ def _decimate_points(pts, res=10):
 
     Parameters
     ----------
-    pts : array, shape = (n_points, 3)
+    pts : array, shape (n_points, 3)
         The points making up the head shape.
     res : scalar
         The resolution of the voxel space (side length of each voxel).
@@ -177,6 +187,7 @@ def _decimate_points(pts, res=10):
     pts : array, shape = (n_points, 3)
         The decimated points.
     """
+    from scipy.spatial.distance import cdist
     pts = np.asarray(pts)
 
     # find the bin edges for the voxel space
@@ -297,12 +308,12 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
         A single tuple containing the translation, rotation and scaling
         parameters in that order.
     """
+    from scipy.optimize import leastsq
     src_pts = np.atleast_2d(src_pts)
     tgt_pts = np.atleast_2d(tgt_pts)
     if src_pts.shape != tgt_pts.shape:
-        err = ("src_pts and tgt_pts must have same shape "
-               "(got {0}, {1})".format(src_pts.shape, tgt_pts.shape))
-        raise ValueError(err)
+        raise ValueError("src_pts and tgt_pts must have same shape (got "
+                         "{0}, {1})".format(src_pts.shape, tgt_pts.shape))
 
     rotate = bool(rotate)
     translate = bool(translate)
@@ -345,9 +356,9 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
         if x0 is None:
             x0 = (0, 0, 0, 0, 0, 0, 1)
     else:
-        err = ("The specified parameter combination is not implemented: "
-               "rotate=%r, translate=%r, scale=%r" % param_info)
-        raise NotImplementedError(err)
+        raise NotImplementedError(
+            "The specified parameter combination is not implemented: "
+            "rotate=%r, translate=%r, scale=%r" % param_info)
 
     x, _, _, _, _ = leastsq(error, x0, full_output=True)
 
@@ -369,62 +380,8 @@ def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
     elif out == 'trans':
         return trans
     else:
-        err = ("Invalid out parameter: %r. Needs to be 'params' or "
-               "'trans'." % out)
-        raise ValueError(err)
-
-
-def get_ras_to_neuromag_trans(nasion, lpa, rpa):
-    """Construct a transformation matrix to the MNE head coordinate system
-
-    Construct a transformation matrix from an arbitrary RAS coordinate system
-    to the MNE head coordinate system, in which the x axis passes through the
-    two preauricular points, and the y axis passes through the nasion and is
-    normal to the x axis. (see mne manual, pg. 97)
-
-    Parameters
-    ----------
-    nasion : array_like, shape = (3,)
-        Nasion point coordinate.
-    lpa : array_like, shape = (3,)
-        Left peri-auricular point coordinate.
-    rpa : array_like, shape = (3,)
-        Right peri-auricular point coordinate.
-
-    Returns
-    -------
-    trans : numpy.array, shape = (4, 4)
-        Transformation matrix to MNE head space.
-    """
-    # check input args
-    nasion = np.asarray(nasion)
-    lpa = np.asarray(lpa)
-    rpa = np.asarray(rpa)
-    for pt in (nasion, lpa, rpa):
-        if pt.ndim != 1 or len(pt) != 3:
-            err = ("Points have to be provided as one dimensional arrays of "
-                   "length 3.")
-            raise ValueError(err)
-
-    right = rpa - lpa
-    right_unit = right / norm(right)
-
-    origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
-
-    anterior = nasion - origin
-    anterior_unit = anterior / norm(anterior)
-
-    superior_unit = np.cross(right_unit, anterior_unit)
-
-    x, y, z = -origin
-    origin_trans = translation(x, y, z)
-
-    trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
-    trans_r = np.reshape([0, 0, 0, 1], (4, 1))
-    rot_trans = np.hstack((trans_l, trans_r))
-
-    trans = np.dot(rot_trans, origin_trans)
-    return trans
+        raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
+                         "'trans'." % out)
 
 
 def _point_cloud_error(src_pts, tgt_pts):
@@ -443,6 +400,7 @@ def _point_cloud_error(src_pts, tgt_pts):
         For each point in ``src_pts``, the distance to the closest point in
         ``tgt_pts``.
     """
+    from scipy.spatial.distance import cdist
     Y = cdist(src_pts, tgt_pts, 'euclidean')
     dist = Y.min(axis=1)
     return dist
@@ -513,6 +471,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
     the distance of each src_pt to the closest tgt_pt can be used as an
     estimate of the distance of src_pt to tgt_pts.
     """
+    from scipy.optimize import leastsq
     kwargs = {'epsfcn': 0.01}
     kwargs.update(leastsq_args)
 
@@ -539,6 +498,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
     param_info = (rotate, translate, scale)
     if param_info == (True, False, 0):
         x0 = x0 or (0, 0, 0)
+
         def error(x):
             rx, ry, rz = x
             trans = rotation3d(rx, ry, rz)
@@ -547,6 +507,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
             return err
     elif param_info == (True, False, 1):
         x0 = x0 or (0, 0, 0, 1)
+
         def error(x):
             rx, ry, rz, s = x
             trans = rotation3d(rx, ry, rz) * s
@@ -555,6 +516,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
             return err
     elif param_info == (True, False, 3):
         x0 = x0 or (0, 0, 0, 1, 1, 1)
+
         def error(x):
             rx, ry, rz, sx, sy, sz = x
             trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
@@ -563,6 +525,7 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
             return err
     elif param_info == (True, True, 0):
         x0 = x0 or (0, 0, 0, 0, 0, 0)
+
         def error(x):
             rx, ry, rz, tx, ty, tz = x
             trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
@@ -570,9 +533,9 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
             err = errfunc(est[:, :3], tgt_pts)
             return err
     else:
-        err = ("The specified parameter combination is not implemented: "
-               "rotate=%r, translate=%r, scale=%r" % param_info)
-        raise NotImplementedError(err)
+        raise NotImplementedError(
+            "The specified parameter combination is not implemented: "
+            "rotate=%r, translate=%r, scale=%r" % param_info)
 
     est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
     logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
@@ -583,9 +546,8 @@ def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
     elif out == 'trans':
         return _trans_from_params(param_info, est)
     else:
-        err = ("Invalid out parameter: %r. Needs to be 'params' or "
-               "'trans'." % out)
-        raise ValueError(err)
+        raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
+                         "'trans'." % out)
 
 
 def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
@@ -651,12 +613,11 @@ def _find_mri_paths(subject='fsaverage', subjects_dir=None):
     # surf/ files
     paths['surf'] = surf = []
     surf_fname = os.path.join(surf_dirname, '{name}')
-    surf_names = ('orig', 'orig_avg',
-                  'inflated', 'inflated_avg', 'inflated_pre',
-                  'pial', 'pial_avg',
-                  'smoothwm',
-                  'white', 'white_avg',
-                  'sphere', 'sphere.reg', 'sphere.reg.avg')
+    surf_names = ('inflated', 'sphere', 'sphere.reg', 'white')
+    if os.getenv('_MNE_FEW_SURFACES', '') != 'true':  # for testing
+        surf_names = surf_names + (
+            'orig', 'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
+            'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
     for name in surf_names:
         for hemi in ('lh.', 'rh.'):
             fname = pformat(surf_fname, name=hemi + name)
@@ -754,31 +715,6 @@ def _mri_subject_has_bem(subject, subjects_dir=None):
     return bool(len(fnames))
 
 
-def read_elp(fname):
-    """Read point coordinates from a text file
-
-    Parameters
-    ----------
-    fname : str
-        Absolute path to laser point file (*.txt).
-
-    Returns
-    -------
-    elp_points : array, [n_points x 3]
-        Point coordinates.
-    """
-    pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
-    with open(fname) as fid:
-        elp_points = pattern.findall(fid.read())
-    elp_points = np.array(elp_points, dtype=float)
-    if elp_points.shape[1] != 3:
-        err = ("File %r does not contain 3 columns as required; got shape "
-               "%s." % (fname, elp_points.shape))
-        raise ValueError(err)
-
-    return elp_points
-
-
 def read_mri_cfg(subject, subjects_dir=None):
     """Read information from the cfg file of a scaled MRI brain
 
@@ -798,9 +734,8 @@ def read_mri_cfg(subject, subjects_dir=None):
     fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
 
     if not os.path.exists(fname):
-        err = ("%r does not seem to be a scaled mri subject: %r does not "
-               "exist." % (subject, fname))
-        raise IOError(err)
+        raise IOError("%r does not seem to be a scaled mri subject: %r does "
+                      "not exist." % (subject, fname))
 
     logger.info("Reading MRI cfg file %s" % fname)
     config = configparser.RawConfigParser()
@@ -854,11 +789,10 @@ def _write_mri_config(fname, subject_from, subject_to, scale):
 
 
 def _scale_params(subject_to, subject_from, scale, subjects_dir):
-    subjects_dir = get_subjects_dir(subjects_dir, True)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     if (subject_from is None) != (scale is None):
-        err = ("Need to provide either both subject_from and scale "
-               "parameters, or neither.")
-        raise TypeError(err)
+        raise TypeError("Need to provide either both subject_from and scale "
+                        "parameters, or neither.")
 
     if subject_from is None:
         cfg = read_mri_cfg(subject_to, subjects_dir)
@@ -872,9 +806,8 @@ def _scale_params(subject_to, subject_from, scale, subjects_dir):
         elif scale.shape == (3,):
             n_params = 3
         else:
-            err = ("Invalid shape for scale parameer. Need scalar or array of "
-                   "length 3. Got %s." % str(scale))
-            raise ValueError(err)
+            raise ValueError("Invalid shape for scale parameer. Need scalar "
+                             "or array of length 3. Got %s." % str(scale))
 
     return subjects_dir, subject_from, n_params, scale
 
@@ -914,11 +847,11 @@ def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
 
     surfs = read_bem_surfaces(src)
     if len(surfs) != 1:
-        err = ("BEM file with more than one surface: %r" % src)
-        raise NotImplementedError(err)
+        raise NotImplementedError("BEM file with more than one surface: %r"
+                                  % src)
     surf0 = surfs[0]
     surf0['rr'] = surf0['rr'] * scale
-    write_bem_surface(dst, surf0)
+    write_bem_surfaces(dst, surf0)
 
 
 def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
@@ -932,7 +865,7 @@ def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
     pattern : str | None
         Pattern for finding the labels relative to the label directory in the
         MRI subject directory (e.g., "lh.BA3a.label" will scale
-        "fsaverage/label/lh.BA3a.label"; "aparc/*.label" will find all labels
+        "fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
         in the "fsaverage/label/aparc" directory). With None, scale all labels.
     overwrite : bool
         Overwrite any label file that already exists for subject_to (otherwise
@@ -1014,9 +947,8 @@ def scale_mri(subject_from, subject_to, scale, overwrite=False,
         if overwrite:
             shutil.rmtree(dest)
         else:
-            err = ("Subject directory for %s already exists: "
-                   "%r" % (subject_to, dest))
-            raise IOError(err)
+            raise IOError("Subject directory for %s already exists: %r"
+                          % (subject_to, dest))
 
     for dirname in paths['dirs']:
         dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
@@ -1121,8 +1053,8 @@ def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
     elif n_params == 3:
         norm_scale = 1. / scale
     else:
-        err = ("Invalid n_params entry in MRI cfg file: %s" % str(n_params))
-        raise RuntimeError(err)
+        raise RuntimeError("Invalid n_params entry in MRI cfg file: %s"
+                           % str(n_params))
 
     # read and scale the source space [in m]
     sss = read_source_spaces(src)
diff --git a/mne/cov.py b/mne/cov.py
index 2ebbd67..5fb6f17 100644
--- a/mne/cov.py
+++ b/mne/cov.py
@@ -1,32 +1,45 @@
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Denis A. Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import copy as cp
 import os
-from math import floor, ceil
+from math import floor, ceil, log
+import itertools as itt
 import warnings
 
+from copy import deepcopy
+
+import six
+from distutils.version import LooseVersion
+
 import numpy as np
 from scipy import linalg
 
 from .io.write import start_file, end_file
-from .io.proj import (make_projector, proj_equal, activate_proj,
+from .io.proj import (make_projector, _proj_equal, activate_proj,
                       _has_eeg_average_ref_proj)
 from .io import fiff_open
 from .io.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
-                      pick_channels)
+                      pick_channels, pick_info, _picks_by_type)
+
 from .io.constants import FIFF
 from .io.meas_info import read_bad_channels
 from .io.proj import _read_proj, _write_proj
 from .io.tag import find_tag
 from .io.tree import dir_tree_find
 from .io.write import (start_block, end_block, write_int, write_name_list,
-                       write_double, write_float_matrix)
+                       write_double, write_float_matrix, write_string)
+from .defaults import _handle_default
 from .epochs import _is_good
-from .utils import check_fname, logger, verbose
+from .utils import (check_fname, logger, verbose, estimate_rank,
+                    _compute_row_norms, check_version, _time_mask)
+from .utils import deprecated
+
 from .externals.six.moves import zip
+from .externals.six import string_types
 
 
 def _check_covs_algebra(cov1, cov2):
@@ -40,13 +53,44 @@ def _check_covs_algebra(cov1, cov2):
                          'SSP projections.')
 
 
+def _get_tslice(epochs, tmin, tmax):
+    """get the slice."""
+    tstart, tend = None, None
+    mask = _time_mask(epochs.times, tmin, tmax)
+    tstart = np.where(mask)[0][0] if tmin is not None else None
+    tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
+    tslice = slice(tstart, tend, None)
+    return tslice
+
+
 class Covariance(dict):
-    """Noise covariance matrix
+
+    """Noise covariance matrix.
+
+    .. warning:: This class should not be instantiated directly, but
+                 instead should be created using a covariance reading or
+                 computation function.
 
     Parameters
     ----------
-    fname : string
-        The name of the raw file.
+    data : array-like
+        The data.
+    names : list of str
+        Channel names.
+    bads : list of str
+        Bad channels.
+    projs : list
+        Projection vectors.
+    nfree : int
+        Degrees of freedom.
+    eig : array-like | None
+        Eigenvalues.
+    eigvec : array-like | None
+        Eigenvectors.
+    method : str | None
+        The method used to compute the covariance.
+    loglik : float
+        The log likelihood.
 
     Attributes
     ----------
@@ -56,30 +100,50 @@ class Covariance(dict):
         List of channels' names.
     nfree : int
         Number of degrees of freedom i.e. number of time points used.
+
+    See Also
+    --------
+    compute_covariance
+    compute_raw_covariance
+    make_ad_hoc_cov
+    read_cov
     """
-    def __init__(self, fname):
-        if fname is None:
-            return
 
-        # Reading
-        fid, tree, _ = fiff_open(fname)
-        self.update(_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV))
-        fid.close()
+    def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
+                 method=None, loglik=None):
+        """Init of covariance."""
+        diag = True if data.ndim == 1 else False
+        self.update(data=data, dim=len(data), names=names, bads=bads,
+                    nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
+                    projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
+        if method is not None:
+            self['method'] = method
+        if loglik is not None:
+            self['loglik'] = loglik
 
     @property
     def data(self):
+        """Numpy array of Noise covariance matrix."""
         return self['data']
 
     @property
     def ch_names(self):
+        """Channel names."""
         return self['names']
 
     @property
     def nfree(self):
+        """Number of degrees of freedom."""
         return self['nfree']
 
     def save(self, fname):
-        """save covariance matrix in a FIF file"""
+        """Save covariance matrix in a FIF file.
+
+        Parameters
+        ----------
+        fname : str
+            Output filename.
+        """
         check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
 
         fid = start_file(fname)
@@ -87,13 +151,24 @@ class Covariance(dict):
         try:
             _write_cov(fid, self)
         except Exception as inst:
+            fid.close()
             os.remove(fname)
             raise inst
 
         end_file(fid)
 
+    def copy(self):
+        """Copy the Covariance object
+
+        Returns
+        -------
+        cov : instance of Covariance
+            The copied object.
+        """
+        return deepcopy(self)
+
     def as_diag(self, copy=True):
-        """Set covariance to be processed as being diagonal
+        """Set covariance to be processed as being diagonal.
 
         Parameters
         ----------
@@ -124,12 +199,16 @@ class Covariance(dict):
         return cov
 
     def __repr__(self):
-        s = "size : %s x %s" % self.data.shape
+        if self.data.ndim == 2:
+            s = 'size : %s x %s' % self.data.shape
+        else:  # ndim == 1
+            s = 'diagonal : %s' % self.data.size
+        s += ", n_samples : %s" % self.nfree
         s += ", data : %s" % self.data
         return "<Covariance  |  %s>" % s
 
     def __add__(self, cov):
-        """Add Covariance taking into account number of degrees of freedom"""
+        """Add Covariance taking into account number of degrees of freedom."""
         _check_covs_algebra(self, cov)
         this_cov = cp.deepcopy(cov)
         this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
@@ -142,7 +221,7 @@ class Covariance(dict):
         return this_cov
 
     def __iadd__(self, cov):
-        """Add Covariance taking into account number of degrees of freedom"""
+        """Add Covariance taking into account number of degrees of freedom."""
         _check_covs_algebra(self, cov)
         self['data'][:] = (((self['data'] * self['nfree']) +
                             (cov['data'] * cov['nfree'])) /
@@ -153,11 +232,46 @@ class Covariance(dict):
 
         return self
 
+    @verbose
+    def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
+             show=True, verbose=None):
+        """Plot Covariance data.
+
+        Parameters
+        ----------
+        info: dict
+            Measurement info.
+        exclude : list of string | str
+            List of channels to exclude. If empty do not exclude any channel.
+            If 'bads', exclude info['bads'].
+        colorbar : bool
+            Show colorbar or not.
+        proj : bool
+            Apply projections or not.
+        show_svd : bool
+            Plot also singular values of the noise covariance for each sensor
+            type. We show square roots ie. standard deviations.
+        show : bool
+            Call pyplot.show() as the end or not.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig_cov : instance of matplotlib.pyplot.Figure
+            The covariance plot.
+        fig_svd : instance of matplotlib.pyplot.Figure | None
+            The SVD spectra plot of the covariance.
+        """
+        from .viz.misc import plot_cov
+        return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
+
 
 ###############################################################################
 # IO
 
-def read_cov(fname):
+ at verbose
+def read_cov(fname, verbose=None):
     """Read a noise covariance from a FIF file.
 
     Parameters
@@ -165,22 +279,69 @@ def read_cov(fname):
     fname : string
         The name of file containing the covariance matrix. It should end with
         -cov.fif or -cov.fif.gz.
+    verbose : bool, str, int, or None (default None)
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     cov : Covariance
         The noise covariance matrix.
+
+    See Also
+    --------
+    write_cov, compute_covariance, compute_raw_covariance
     """
     check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
-
-    return Covariance(fname)
+    f, tree = fiff_open(fname)[:2]
+    with f as fid:
+        return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
+                                      limited=True))
 
 
 ###############################################################################
 # Estimate from data
 
+ at verbose
+def make_ad_hoc_cov(info, verbose=None):
+    """Create an ad hoc noise covariance.
+
+    Parameters
+    ----------
+    info : instance of mne.io.meas_info.Info
+        Measurement info.
+    verbose : bool, str, int, or None (default None)
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    cov : instance of Covariance
+        The ad hoc diagonal noise covariance for the M/EEG data channels.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=[]))
+    info._check_consistency()
+
+    # Standard deviations to be used
+    grad_std = 5e-13
+    mag_std = 20e-15
+    eeg_std = 0.2e-6
+    logger.info('Using standard noise values '
+                '(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
+                % (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
+
+    data = np.zeros(len(info['ch_names']))
+    for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
+                             (grad_std, mag_std, eeg_std)):
+        data[pick_types(info, meg=meg, eeg=eeg)] = val * val
+    return Covariance(data, info['ch_names'], info['bads'], info['projs'],
+                      nfree=0)
+
+
 def _check_n_samples(n_samples, n_chan):
-    """Check to see if there are enough samples for reliable cov calc"""
+    """Check to see if there are enough samples for reliable cov calc."""
     n_samples_min = 10 * (n_chan + 1) // 2
     if n_samples <= 0:
         raise ValueError('No samples found to compute the covariance matrix')
@@ -191,11 +352,21 @@ def _check_n_samples(n_samples, n_chan):
         logger.warning(text)
 
 
+ at deprecated('"compute_raw_data_covariance" is deprecated and will be '
+            'removed in MNE-0.11. Please use compute_raw_covariance instead')
 @verbose
 def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
                                 reject=None, flat=None, picks=None,
                                 verbose=None):
-    """Estimate noise covariance matrix from a continuous segment of raw data
+    return compute_raw_covariance(raw, tmin, tmax, tstep,
+                                  reject, flat, picks, verbose)
+
+
+ at verbose
+def compute_raw_covariance(raw, tmin=None, tmax=None, tstep=0.2,
+                           reject=None, flat=None, picks=None,
+                           verbose=None):
+    """Estimate noise covariance matrix from a continuous segment of raw data.
 
     It is typically useful to estimate a noise covariance
     from empty room data or time intervals before starting
@@ -208,14 +379,14 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
     ----------
     raw : instance of Raw
         Raw data
-    tmin : float
+    tmin : float | None (default None)
         Beginning of time interval in seconds
-    tmax : float
+    tmax : float | None (default None)
         End of time interval in seconds
-    tstep : float
+    tstep : float (default 0.2)
         Length of data chunks for artefact rejection in seconds.
-    reject : dict
-        Rejection parameters based on peak to peak amplitude.
+    reject : dict | None (default None)
+        Rejection parameters based on peak-to-peak amplitude.
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
         If reject is None then no rejection is done. Example::
 
@@ -225,20 +396,25 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
                           eog=250e-6 # uV (EOG channels)
                           )
 
-    flat : dict
-        Rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+    flat : dict | None (default None)
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
         If flat is None then no rejection is done.
-    picks : array-like of int
+    picks : array-like of int | None (default None)
         Indices of channels to include (if None, all channels
         except bad channels are used).
-    verbose : bool, str, int, or None
+    verbose : bool | str | int | None (default None)
         If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     cov : instance of Covariance
         Noise covariance matrix.
+
+    See Also
+    --------
+    compute_covariance : Estimate noise covariance matrix from epochs
     """
     sfreq = raw.info['sfreq']
 
@@ -259,10 +435,7 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
     n_samples = 0
     mu = 0
 
-    info = cp.copy(raw.info)
-    info['chs'] = [info['chs'][k] for k in picks]
-    info['ch_names'] = [info['ch_names'][k] for k in picks]
-    info['nchan'] = len(picks)
+    info = pick_info(raw.info, picks)
     idx_by_type = channel_indices_by_type(info)
 
     # Read data in chuncks
@@ -286,27 +459,19 @@ def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
     logger.info("Number of samples used : %d" % n_samples)
     logger.info('[done]')
 
-    cov = Covariance(None)
-
     ch_names = [raw.info['ch_names'][k] for k in picks]
+    bads = [b for b in raw.info['bads'] if b in ch_names]
+    projs = cp.deepcopy(raw.info['projs'])
     # XXX : do not compute eig and eigvec now (think it's better...)
-    eig = None
-    eigvec = None
-
-    #   Store structure for fif
-    cov.update(kind=FIFF.FIFFV_MNE_NOISE_COV, diag=False, dim=len(data),
-               names=ch_names, data=data,
-               projs=cp.deepcopy(raw.info['projs']),
-               bads=raw.info['bads'], nfree=n_samples, eig=eig,
-               eigvec=eigvec)
-
-    return cov
+    return Covariance(data, ch_names, bads, projs, nfree=n_samples)
 
 
 @verbose
 def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
-                       projs=None, verbose=None):
-    """Estimate noise covariance matrix from epochs
+                       projs=None, method='empirical', method_params=None,
+                       cv=3, scalings=None, n_jobs=1, return_estimators=False,
+                       verbose=None):
+    """Estimate noise covariance matrix from epochs.
 
     The noise covariance is typically estimated on pre-stim periods
     when the stim onset is defined from events.
@@ -326,37 +491,157 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
           merge_events(). However, the resulting covariance matrix
           will only be correct if keep_sample_mean is True.
 
+    Note: The covariance can be unstable if the number of samples is not
+          sufficient. In that case it is common to regularize a covariance
+          estimate. The ``method`` parameter of this function allows to
+          regularize the covariance in an automated way. It also allows
+          to select between different alternative estimation algorithms which
+          themselves achieve regularization. Details are described in [1].
+
     Parameters
     ----------
     epochs : instance of Epochs, or a list of Epochs objects
-        The epochs
-    keep_sample_mean : bool
+        The epochs.
+    keep_sample_mean : bool (default true)
         If False, the average response over epochs is computed for
         each event type and subtracted during the covariance
         computation. This is useful if the evoked response from a
         previous stimulus extends into the baseline period of the next.
-    tmin : float | None
+        Note. This option is only implemented for method='empirical'.
+    tmin : float | None (default None)
         Start time for baseline. If None start at first sample.
-    tmax : float | None
+    tmax : float | None (default None)
         End time for baseline. If None end at last sample.
-    projs : list of Projection | None
+    projs : list of Projection | None (default None)
         List of projectors to use in covariance calculation, or None
         to indicate that the projectors from the epochs should be
         inherited. If None, then projectors from all epochs must match.
-    verbose : bool, str, int, or None
+    method : str | list | None (default 'empirical')
+        The method used for covariance estimation. If 'empirical' (default),
+        the sample covariance will be computed. A list can be passed to run a
+        set of the different methods.
+        If 'auto' or a list of methods, the best estimator will be determined
+        based on log-likelihood and cross-validation on unseen data as
+        described in ref. [1]. Valid methods are:
+        'empirical', the empirical or sample covariance,
+        'diagonal_fixed', a diagonal regularization as in mne.cov.regularize
+        (see MNE manual), 'ledoit_wolf', the Ledoit-Wolf estimator (see [2]),
+        'shrunk' like 'ledoit_wolf' with cross-validation for optimal alpha
+        (see scikit-learn documentation on covariance estimation), 'pca',
+        probabilistic PCA with low rank
+        (see [3]), and, 'factor_analysis', Factor Analysis with low rank
+        (see [4]). If 'auto', expands to::
+
+             ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
+
+        Note. 'ledoit_wolf' and 'pca' are similar to 'shrunk' and
+        'factor_analysis', respectively. They are not included to avoid
+        redundancy. In most cases 'shrunk' and 'factor_analysis' represent
+        more appropriate default choices.
+
+        .. versionadded:: 0.9.0
+
+    method_params : dict | None (default None)
+        Additional parameters to the estimation procedure. Only considered if
+        method is not None. Keys must correspond to the value(s) of `method`.
+        If None (default), expands to::
+
+            'empirical': {'store_precision': False, 'assume_centered': True},
+            'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
+                               'store_precision': False,
+                               'assume_centered': True},
+            'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
+            'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
+                       'store_precision': False, 'assume_centered': True},
+            'pca': {'iter_n_components': None},
+            'factor_analysis': {'iter_n_components': None}
+
+    cv : int | sklearn cross_validation object (default 3)
+        The cross validation method. Defaults to 3, which will
+        internally trigger a default 3-fold shuffle split.
+    scalings : dict | None (default None)
+        Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
+        These defaults will scale magnetometers and gradiometers
+        at the same unit.
+    n_jobs : int (default 1)
+        Number of jobs to run in parallel.
+    return_estimators : bool (default False)
+        Whether to return all estimators or the best. Only considered if
+        method equals 'auto' or is a list of str. Defaults to False
+    verbose : bool | str | int | or None (default None)
         If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
-    cov : instance of Covariance
-        The computed covariance.
+    cov : instance of Covariance | list
+        The computed covariance. If method equals 'auto' or is a list of str
+        and return_estimators equals True, a list of covariance estimators is
+        returned (sorted by log-likelihood, from high to low, i.e. from best
+        to worst).
+
+    See Also
+    --------
+    compute_raw_covariance : Estimate noise covariance from raw data
+
+    References
+    ----------
+    [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+        covariance estimation and spatial whitening of MEG and EEG signals,
+        vol. 108, 328-342, NeuroImage.
+    [2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
+        large-dimensional covariance matrices. Journal of Multivariate
+        Analysis 88 (2), 365 - 411.
+    [3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
+        component analysis. Journal of the Royal Statistical Society: Series
+        B (Statistical Methodology) 61 (3), 611 - 622.
+    [4] Barber, D., (2012). Bayesian reasoning and machine learning.
+        Cambridge University Press., Algorithm 21.1
     """
+    accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
+                        'shrunk', 'pca', 'factor_analysis',)
+    msg = ('Invalid method ({method}). Accepted values (individually or '
+           'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
+
+    # scale to natural unit for best stability with MEG/EEG
+    if isinstance(scalings, dict):
+        for k, v in scalings.items():
+            if k not in ('mag', 'grad', 'eeg'):
+                raise ValueError('The keys in `scalings` must be "mag" or'
+                                 '"grad" or "eeg". You gave me: %s' % k)
+    scalings = _handle_default('scalings', scalings)
+
+    _method_params = {
+        'empirical': {'store_precision': False, 'assume_centered': True},
+        'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
+                           'store_precision': False, 'assume_centered': True},
+        'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
+        'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
+                   'store_precision': False, 'assume_centered': True},
+        'pca': {'iter_n_components': None},
+        'factor_analysis': {'iter_n_components': None}
+    }
+    if isinstance(method_params, dict):
+        for key, values in method_params.items():
+            if key not in _method_params:
+                raise ValueError('key (%s) must be "%s"' %
+                                 (key, '" or "'.join(_method_params)))
+
+            _method_params[key].update(method_params[key])
+
+    # for multi condition support epochs is required to refer to a list of
+    # epochs objects
+
+    def _unpack_epochs(epochs):
+        if len(epochs.event_id) > 1:
+            epochs = [epochs[k] for k in epochs.event_id]
+        else:
+            epochs = [epochs]
+        return epochs
 
     if not isinstance(epochs, list):
         epochs = _unpack_epochs(epochs)
     else:
-        epochs = [ep for li in [_unpack_epochs(epoch) for epoch in epochs]
-                  for ep in li]
+        epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
 
     # check for baseline correction
     for epochs_t in epochs:
@@ -364,6 +649,8 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
             warnings.warn('Epochs are not baseline corrected, covariance '
                           'matrix may be inaccurate')
 
+    for epoch in epochs:
+        epoch.info._check_consistency()
     bads = epochs[0].info['bads']
     if projs is None:
         projs = cp.deepcopy(epochs[0].info['projs'])
@@ -372,7 +659,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
             if epochs_t.proj != epochs[0].proj:
                 raise ValueError('Epochs must agree on the use of projections')
             for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
-                if not proj_equal(proj_a, proj_b):
+                if not _proj_equal(proj_a, proj_b):
                     raise ValueError('Epochs must have same projectors')
     else:
         projs = cp.deepcopy(projs)
@@ -384,68 +671,443 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
             raise ValueError('Epochs must have same bad channels')
         if epochs_t.ch_names != ch_names:
             raise ValueError('Epochs must have same channel names')
-
-    n_epoch_types = len(epochs)
-    data = 0.0
-    data_mean = list(np.zeros(n_epoch_types))
-    n_samples = np.zeros(n_epoch_types, dtype=np.int)
-    n_epochs = np.zeros(n_epoch_types, dtype=np.int)
-
-    picks_meeg = pick_types(epochs[0].info, meg=True, eeg=True, eog=False,
-                            ref_meg=False, exclude=[])
+    picks_list = _picks_by_type(epochs[0].info)
+    picks_meeg = np.concatenate([b for _, b in picks_list])
+    picks_meeg = np.sort(picks_meeg)
     ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
+    info = epochs[0].info  # we will overwrite 'epochs'
+
+    if method == 'auto':
+        method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
+
+    if not isinstance(method, (list, tuple)):
+        method = [method]
+
+    ok_sklearn = check_version('sklearn', '0.15') is True
+    if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
+        raise ValueError('scikit-learn is not installed, `method` must be '
+                         '`empirical`')
+
+    if keep_sample_mean is False:
+        if len(method) != 1 or 'empirical' not in method:
+            raise ValueError('`keep_sample_mean=False` is only supported'
+                             'with `method="empirical"`')
+        for p, v in _method_params.items():
+            if v.get('assume_centered', None) is False:
+                raise ValueError('`assume_centered` must be True'
+                                 ' if `keep_sample_mean` is False')
+        # prepare mean covs
+        n_epoch_types = len(epochs)
+        data_mean = list(np.zeros(n_epoch_types))
+        n_samples = np.zeros(n_epoch_types, dtype=np.int)
+        n_epochs = np.zeros(n_epoch_types, dtype=np.int)
+
+        for ii, epochs_t in enumerate(epochs):
+
+            tslice = _get_tslice(epochs_t, tmin, tmax)
+            for e in epochs_t:
+                e = e[picks_meeg, tslice]
+                if not keep_sample_mean:
+                    data_mean[ii] += e
+                n_samples[ii] += e.shape[1]
+                n_epochs[ii] += 1
 
-    for i, epochs_t in enumerate(epochs):
-
-        tstart, tend = None, None
-        if tmin is not None:
-            tstart = np.where(epochs_t.times >= tmin)[0][0]
-        if tmax is not None:
-            tend = np.where(epochs_t.times <= tmax)[0][-1] + 1
-        tslice = slice(tstart, tend, None)
-
-        for e in epochs_t:
-            e = e[picks_meeg][:, tslice]
-            if not keep_sample_mean:
-                data_mean[i] += e
-            data += np.dot(e, e.T)
-            n_samples[i] += e.shape[1]
-            n_epochs[i] += 1
-
-    n_samples_tot = int(np.sum(n_samples))
-
-    _check_n_samples(n_samples_tot, len(picks_meeg))
-
-    if keep_sample_mean:
-        data /= n_samples_tot
-    else:
         n_samples_epoch = n_samples // n_epochs
         norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
-        for i, mean in enumerate(data_mean):
-            data -= 1.0 / n_epochs[i] * np.dot(mean, mean.T)
-        data /= norm_const
+        data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
+                     in zip(n_epochs, data_mean)]
 
-    cov = Covariance(None)
+    if not all(k in accepted_methods for k in method):
+        raise ValueError(msg.format(method=method))
 
-    # XXX : do not compute eig and eigvec now (think it's better...)
-    eig = None
-    eigvec = None
+    info = pick_info(info, picks_meeg)
+    tslice = _get_tslice(epochs[0], tmin, tmax)
+    epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
+    picks_meeg = np.arange(len(picks_meeg))
+    picks_list = _picks_by_type(info)
 
-    cov.update(kind=1, diag=False, dim=len(data), names=ch_names,
-               data=data, projs=projs, bads=epochs[0].info['bads'],
-               nfree=n_samples_tot, eig=eig, eigvec=eigvec)
+    if len(epochs) > 1:
+        epochs = np.concatenate(epochs, 0)
+    else:
+        epochs = epochs[0]
 
-    logger.info("Number of samples used : %d" % n_samples_tot)
-    logger.info('[done]')
+    epochs = np.hstack(epochs)
+    n_samples_tot = epochs.shape[-1]
+    _check_n_samples(n_samples_tot, len(picks_meeg))
 
-    return cov
+    epochs = epochs.T  # sklearn | C-order
+    if ok_sklearn:
+        cov_data = _compute_covariance_auto(epochs, method=method,
+                                            method_params=_method_params,
+                                            info=info,
+                                            verbose=verbose,
+                                            cv=cv,
+                                            n_jobs=n_jobs,
+                                            # XXX expose later
+                                            stop_early=True,  # if needed.
+                                            picks_list=picks_list,
+                                            scalings=scalings)
+    else:
+        if _method_params['empirical']['assume_centered'] is True:
+            cov = epochs.T.dot(epochs) / n_samples_tot
+        else:
+            cov = np.cov(epochs.T, bias=1)
+        cov_data = {'empirical': {'data': cov}}
+
+    if keep_sample_mean is False:
+        cov = cov_data['empirical']['data']
+        # undo scaling
+        cov *= n_samples_tot
+        # ... apply pre-computed class-wise normalization
+        for mean_cov in data_mean:
+            cov -= mean_cov
+        cov /= norm_const
+
+    covs = list()
+    for this_method, data in cov_data.items():
+        cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
+                         nfree=n_samples_tot)
+        logger.info('Number of samples used : %d' % n_samples_tot)
+        logger.info('[done]')
+
+        # add extra info
+        cov.update(method=this_method, **data)
+        covs.append(cov)
+
+    if ok_sklearn:
+        msg = ['log-likelihood on unseen data (descending order):']
+        logliks = [(c['method'], c['loglik']) for c in covs]
+        logliks.sort(reverse=True, key=lambda c: c[1])
+        for k, v in logliks:
+            msg.append('%s: %0.3f' % (k, v))
+        logger.info('\n   '.join(msg))
+
+    if ok_sklearn and not return_estimators:
+        keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
+        out = covs[np.argmax(scores)]
+        logger.info('selecting best estimator: {0}'.format(out['method']))
+    elif ok_sklearn:
+        out = covs
+        out.sort(key=lambda c: c['loglik'], reverse=True)
+    else:
+        out = covs[0]
+
+    return out
+
+
+def _compute_covariance_auto(data, method, info, method_params, cv,
+                             scalings, n_jobs, stop_early, picks_list,
+                             verbose):
+    """docstring for _compute_covariance_auto."""
+    from sklearn.grid_search import GridSearchCV
+    from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
+                                    EmpiricalCovariance)
+
+    # rescale to improve numerical stability
+    _apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
+    estimator_cov_info = list()
+    msg = 'Estimating covariance using %s'
+    _RegCovariance, _ShrunkCovariance = _get_covariance_classes()
+    for this_method in method:
+        data_ = data.copy()
+        name = this_method.__name__ if callable(this_method) else this_method
+        logger.info(msg % name.upper())
+
+        if this_method == 'empirical':
+            est = EmpiricalCovariance(**method_params[this_method])
+            est.fit(data_)
+            _info = None
+            estimator_cov_info.append((est, est.covariance_, _info))
+
+        elif this_method == 'diagonal_fixed':
+            est = _RegCovariance(info=info, **method_params[this_method])
+            est.fit(data_)
+            _info = None
+            estimator_cov_info.append((est, est.covariance_, _info))
+
+        elif this_method == 'ledoit_wolf':
+            shrinkages = []
+            lw = LedoitWolf(**method_params[this_method])
+
+            for ch_type, picks in picks_list:
+                lw.fit(data_[:, picks])
+                shrinkages.append((
+                    ch_type,
+                    lw.shrinkage_,
+                    picks
+                ))
+            sc = _ShrunkCovariance(shrinkage=shrinkages,
+                                   **method_params[this_method])
+            sc.fit(data_)
+            _info = None
+            estimator_cov_info.append((sc, sc.covariance_, _info))
+
+        elif this_method == 'shrunk':
+            shrinkage = method_params[this_method].pop('shrinkage')
+            tuned_parameters = [{'shrinkage': shrinkage}]
+            shrinkages = []
+            gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
+                              tuned_parameters, cv=cv)
+            for ch_type, picks in picks_list:
+                gs.fit(data_[:, picks])
+                shrinkages.append((
+                    ch_type,
+                    gs.best_estimator_.shrinkage,
+                    picks
+                ))
+            shrinkages = [c[0] for c in zip(shrinkages)]
+            sc = _ShrunkCovariance(shrinkage=shrinkages,
+                                   **method_params[this_method])
+            sc.fit(data_)
+            _info = None
+            estimator_cov_info.append((sc, sc.covariance_, _info))
+
+        elif this_method == 'pca':
+            mp = method_params[this_method]
+            pca, _info = _auto_low_rank_model(data_, this_method,
+                                              n_jobs=n_jobs,
+                                              method_params=mp, cv=cv,
+                                              stop_early=stop_early)
+            pca.fit(data_)
+            estimator_cov_info.append((pca, pca.get_covariance(), _info))
+
+        elif this_method == 'factor_analysis':
+            mp = method_params[this_method]
+            fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
+                                             method_params=mp, cv=cv,
+                                             stop_early=stop_early)
+            fa.fit(data_)
+            estimator_cov_info.append((fa, fa.get_covariance(), _info))
+        else:
+            raise ValueError('Oh no! Your estimator does not have'
+                             ' a .fit method')
+        logger.info('Done.')
+
+    logger.info('Using cross-validation to select the best estimator.')
+    estimators, _, _ = zip(*estimator_cov_info)
+    logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
+
+    # undo scaling
+    for c in estimator_cov_info:
+        _undo_scaling_cov(c[1], picks_list, scalings)
+
+    out = dict()
+    estimators, covs, runtime_infos = zip(*estimator_cov_info)
+    cov_methods = [c.__name__ if callable(c) else c for c in method]
+    runtime_infos, covs = list(runtime_infos), list(covs)
+    my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
+    for this_method, runtime_info, loglik, data, est in my_zip:
+        out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
+        if runtime_info is not None:
+            out[this_method].update(runtime_info)
+
+    return out
+
+
+def _logdet(A):
+    """Compute the log det of a symmetric matrix."""
+    vals = linalg.eigh(A)[0]
+    vals = np.abs(vals)  # avoid negative values (numerical errors)
+    return np.sum(np.log(vals))
+
+
+def _gaussian_loglik_scorer(est, X, y=None):
+    """Compute the Gaussian log likelihood of X under the model in est."""
+    # compute empirical covariance of the test set
+    precision = est.get_precision()
+    n_samples, n_features = X.shape
+    log_like = np.zeros(n_samples)
+    log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
+    log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
+    out = np.mean(log_like)
+    return out
+
+
+def _cross_val(data, est, cv, n_jobs):
+    """Helper to compute cross validation."""
+    from sklearn.cross_validation import cross_val_score
+    return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
+                                   scoring=_gaussian_loglik_scorer))
+
+
+def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
+                         stop_early=True, verbose=None):
+    """compute latent variable models."""
+    method_params = cp.deepcopy(method_params)
+    iter_n_components = method_params.pop('iter_n_components')
+    if iter_n_components is None:
+        iter_n_components = np.arange(5, data.shape[1], 5)
+    from sklearn.decomposition import PCA, FactorAnalysis
+    if mode == 'factor_analysis':
+        est = FactorAnalysis
+    elif mode == 'pca':
+        est = PCA
+    else:
+        raise ValueError('Come on, this is not a low rank estimator: %s' %
+                         mode)
+    est = est(**method_params)
+    est.n_components = 1
+    scores = np.empty_like(iter_n_components, dtype=np.float64)
+    scores.fill(np.nan)
+
+    # make sure we don't empty the thing if it's a generator
+    max_n = max(list(cp.deepcopy(iter_n_components)))
+    if max_n > data.shape[1]:
+        warnings.warn('You are trying to estimate %i components on matrix '
+                      'with %i features.' % (max_n, data.shape[1]))
+
+    for ii, n in enumerate(iter_n_components):
+        est.n_components = n
+        try:  # this may fail depending on rank and split
+            score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
+        except ValueError:
+            score = np.inf
+        if np.isinf(score) or score > 0:
+            logger.info('... infinite values encountered. stopping estimation')
+            break
+        logger.info('... rank: %i - loglik: %0.3f' % (n, score))
+        if score != -np.inf:
+            scores[ii] = score
+
+        if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
+           stop_early is True):
+            # early stop search when loglik has been going down 3 times
+            logger.info('early stopping parameter search.')
+            break
+
+    # happens if rank is too low right form the beginning
+    if np.isnan(scores).all():
+        raise RuntimeError('Oh no! Could not estimate covariance because all '
+                           'scores were NaN. Please contact the MNE-Python '
+                           'developers.')
+
+    i_score = np.nanargmax(scores)
+    best = est.n_components = iter_n_components[i_score]
+    logger.info('... best model at rank = %i' % best)
+    runtime_info = {'ranks': np.array(iter_n_components),
+                    'scores': scores,
+                    'best': best,
+                    'cv': cv}
+    return est, runtime_info
+
+
+def _get_covariance_classes():
+    """Prepare special cov estimators."""
+    from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
+                                    ShrunkCovariance)
+
+    class _RegCovariance(EmpiricalCovariance):
+
+        """Aux class."""
+
+        def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
+                     store_precision=False, assume_centered=False):
+            self.info = info
+            self.grad = grad
+            self.mag = mag
+            self.eeg = eeg
+            self.store_precision = store_precision
+            self.assume_centered = assume_centered
+
+        def fit(self, X):
+            EmpiricalCovariance.fit(self, X)
+            self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
+            cov_ = Covariance(
+                data=self.covariance_, names=self.info['ch_names'],
+                bads=self.info['bads'], projs=self.info['projs'],
+                nfree=len(self.covariance_))
+            cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
+                              eeg=self.eeg, proj=False,
+                              exclude='bads')  # ~proj == important!!
+            self.covariance_ = cov_.data
+            return self
+
+    class _ShrunkCovariance(ShrunkCovariance):
+
+        """Aux class."""
+
+        def __init__(self, store_precision, assume_centered, shrinkage=0.1):
+            self.store_precision = store_precision
+            self.assume_centered = assume_centered
+            self.shrinkage = shrinkage
+
+        def fit(self, X):
+            EmpiricalCovariance.fit(self, X)
+            cov = self.covariance_
+
+            if not isinstance(self.shrinkage, (list, tuple)):
+                shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
+            else:
+                shrinkage = self.shrinkage
+
+            zero_cross_cov = np.zeros_like(cov, dtype=bool)
+            for a, b in itt.combinations(shrinkage, 2):
+                picks_i, picks_j = a[2], b[2]
+                ch_ = a[0], b[0]
+                if 'eeg' in ch_:
+                    zero_cross_cov[np.ix_(picks_i, picks_j)] = True
+                    zero_cross_cov[np.ix_(picks_j, picks_i)] = True
+
+            self.zero_cross_cov_ = zero_cross_cov
+
+            # Apply shrinkage to blocks
+            for ch_type, c, picks in shrinkage:
+                sub_cov = cov[np.ix_(picks, picks)]
+                cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
+                                                              shrinkage=c)
+
+            # Apply shrinkage to cross-cov
+            for a, b in itt.combinations(shrinkage, 2):
+                shrinkage_i, shrinkage_j = a[1], b[1]
+                picks_i, picks_j = a[2], b[2]
+                c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
+                cov[np.ix_(picks_i, picks_j)] *= c_ij
+                cov[np.ix_(picks_j, picks_i)] *= c_ij
+
+            # Set to zero the necessary cross-cov
+            if np.any(zero_cross_cov):
+                cov[zero_cross_cov] = 0.0
+
+            self.covariance_ = cov
+            return self
+
+        def score(self, X_test, y=None):
+            """Compute the log-likelihood of a Gaussian data set with
+            `self.covariance_` as an estimator of its covariance matrix.
+
+            Parameters
+            ----------
+            X_test : array-like, shape = [n_samples, n_features]
+                Test data of which we compute the likelihood, where n_samples
+                is the number of samples and n_features is the number of
+                features. X_test is assumed to be drawn from the same
+                distribution as the data used in fit (including centering).
+
+            y : not used, present for API consistence purpose.
+
+            Returns
+            -------
+            res : float
+                The likelihood of the data set with `self.covariance_` as an
+                estimator of its covariance matrix.
+            """
+            from sklearn.covariance import empirical_covariance, log_likelihood
+            # compute empirical covariance of the test set
+            test_cov = empirical_covariance(X_test - self.location_,
+                                            assume_centered=True)
+            if np.any(self.zero_cross_cov_):
+                test_cov[self.zero_cross_cov_] = 0.
+            res = log_likelihood(test_cov, self.get_precision())
+            return res
+
+    return _RegCovariance, _ShrunkCovariance
 
 
 ###############################################################################
 # Writing
 
 def write_cov(fname, cov):
-    """Write a noise covariance matrix
+    """Write a noise covariance matrix.
 
     Parameters
     ----------
@@ -453,6 +1115,10 @@ def write_cov(fname, cov):
         The name of the file. It should end with -cov.fif or -cov.fif.gz.
     cov : Covariance
         The noise covariance matrix
+
+    See Also
+    --------
+    read_cov
     """
     cov.save(fname)
 
@@ -460,13 +1126,8 @@ def write_cov(fname, cov):
 ###############################################################################
 # Prepare for inverse modeling
 
-def rank(A, tol=1e-8):
-    s = linalg.svd(A, compute_uv=0)
-    return np.sum(np.where(s > s[0] * tol, 1, 0))
-
-
 def _unpack_epochs(epochs):
-    """ Aux Function """
+    """Aux Function."""
     if len(epochs.event_id) > 1:
         epochs = [epochs[k] for k in epochs.event_id]
     else:
@@ -475,13 +1136,13 @@ def _unpack_epochs(epochs):
     return epochs
 
 
- at verbose
-def _get_whitener(A, pca, ch_type, verbose=None):
+def _get_ch_whitener(A, pca, ch_type, rank):
+    """"Get whitener params for a set of channels."""
     # whitening operator
-    rnk = rank(A)
     eig, eigvec = linalg.eigh(A, overwrite_a=True)
     eigvec = eigvec.T
-    eig[:-rnk] = 0.0
+    eig[:-rank] = 0.0
+
     logger.info('Setting small %s eigenvalues to zero.' % ch_type)
     if not pca:  # No PCA case.
         logger.info('Not doing PCA for %s.' % ch_type)
@@ -489,13 +1150,14 @@ def _get_whitener(A, pca, ch_type, verbose=None):
         logger.info('Doing PCA for %s.' % ch_type)
         # This line will reduce the actual number of variables in data
         # and leadfield to the true rank.
-        eigvec = eigvec[:-rnk].copy()
+        eigvec = eigvec[:-rank].copy()
     return eig, eigvec
 
 
 @verbose
-def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
-    """Prepare noise covariance matrix
+def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
+                      scalings=None, verbose=None):
+    """Prepare noise covariance matrix.
 
     Parameters
     ----------
@@ -505,15 +1167,28 @@ def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
         The measurement info (used to get channel types and bad channels).
     ch_names : list
         The channel names to be considered.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None
+        Data will be rescaled before rank estimation to improve accuracy.
+        If dict, it will override the following dict (default if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
     C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
     if noise_cov['diag'] is False:
-        C = noise_cov.data[C_ch_idx][:, C_ch_idx]
+        C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
     else:
         C = np.diag(noise_cov.data[C_ch_idx])
 
+    scalings = _handle_default('scalings_cov_rank', scalings)
+
     # Create the projection operator
     proj, ncomp, _ = make_projector(info['projs'], ch_names)
     if ncomp > 0:
@@ -533,13 +1208,35 @@ def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
     has_meg = len(C_meg_idx) > 0
     has_eeg = len(C_eeg_idx) > 0
 
-    if has_meg:
-        C_meg = C[C_meg_idx][:, C_meg_idx]
-        C_meg_eig, C_meg_eigvec = _get_whitener(C_meg, False, 'MEG')
+    # Get the specified noise covariance rank
+    if rank is not None:
+        if isinstance(rank, dict):
+            rank_meg = rank.get('meg', None)
+            rank_eeg = rank.get('eeg', None)
+        else:
+            rank_meg = int(rank)
+            rank_eeg = None
+    else:
+        rank_meg, rank_eeg = None, None
 
+    if has_meg:
+        C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
+        this_info = pick_info(info, pick_meg)
+        if rank_meg is None:
+            if len(C_meg_idx) < len(pick_meg):
+                this_info = pick_info(info, C_meg_idx)
+            rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
+        C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
+                                                   rank_meg)
     if has_eeg:
-        C_eeg = C[C_eeg_idx][:, C_eeg_idx]
-        C_eeg_eig, C_eeg_eigvec = _get_whitener(C_eeg, False, 'EEG')
+        C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
+        this_info = pick_info(info, pick_eeg)
+        if rank_eeg is None:
+            if len(C_meg_idx) < len(pick_meg):
+                this_info = pick_info(info, C_eeg_idx)
+            rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
+        C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
+                                                   rank_eeg)
         if not _has_eeg_average_ref_proj(info['projs']):
             warnings.warn('No average EEG reference present in info["projs"], '
                           'covariance may be adversely affected. Consider '
@@ -568,38 +1265,50 @@ def prepare_noise_cov(noise_cov, info, ch_names, verbose=None):
 
 def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
                proj=True, verbose=None):
-    """Regularize noise covariance matrix
+    """Regularize noise covariance matrix.
 
     This method works by adding a constant to the diagonal for each
     channel type separately. Special care is taken to keep the
     rank of the data constant.
 
+    **Note:** This function is kept for reasons of backward-compatibility.
+    Please consider explicitly using the ``method`` parameter in
+    `compute_covariance` to directly combine estimation with regularization
+    in a data-driven fashion see the
+    `faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
+    for more information.
+
     Parameters
     ----------
     cov : Covariance
         The noise covariance matrix.
     info : dict
         The measurement info (used to get channel types and bad channels).
-    mag : float
+    mag : float (default 0.1)
         Regularization factor for MEG magnetometers.
-    grad : float
+    grad : float (default 0.1)
         Regularization factor for MEG gradiometers.
-    eeg : float
+    eeg : float (default 0.1)
         Regularization factor for EEG.
-    exclude : list | 'bads'
+    exclude : list | 'bads' (default 'bads')
         List of channels to mark as bad. If 'bads', bads channels
         are extracted from both info['bads'] and cov['bads'].
-    proj : bool
+    proj : bool (default true)
         Apply or not projections to keep rank of data.
-    verbose : bool, str, int, or None
+    verbose : bool | str | int | None (default None)
         If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     reg_cov : Covariance
         The regularized covariance matrix.
-    """
+
+    See Also
+    --------
+    compute_covariance
+    """  # noqa
     cov = cp.deepcopy(cov)
+    info._check_consistency()
 
     if exclude is None:
         raise ValueError('exclude must be a list of strings or "bads"')
@@ -651,7 +1360,7 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
 
         logger.info("    %s regularization : %s" % (desc, reg))
 
-        this_C = C[idx][:, idx]
+        this_C = C[np.ix_(idx, idx)]
         if proj:
             this_ch_names = [ch_names[k] for k in idx]
             P, ncomp, _ = make_projector(projs, this_ch_names)
@@ -675,8 +1384,82 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
     return cov
 
 
-def compute_whitener(noise_cov, info, picks=None, verbose=None):
-    """Compute whitening matrix
+def _regularized_covariance(data, reg=None):
+    """Compute a regularized covariance from data using sklearn.
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_channels, n_times)
+        Data for covariance estimation.
+    reg : float | str | None (default None)
+        If not None, allow regularization for covariance estimation
+        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+
+    Returns
+    -------
+    cov : ndarray, shape (n_channels, n_channels)
+        The covariance matrix.
+    """
+    if reg is None:
+        # compute empirical covariance
+        cov = np.cov(data)
+    else:
+        no_sklearn_err = ('the scikit-learn package is missing and '
+                          'required for covariance regularization.')
+        # use sklearn covariance estimators
+        if isinstance(reg, float):
+            if (reg < 0) or (reg > 1):
+                raise ValueError('0 <= shrinkage <= 1 for '
+                                 'covariance regularization.')
+            try:
+                import sklearn
+                sklearn_version = LooseVersion(sklearn.__version__)
+                from sklearn.covariance import ShrunkCovariance
+            except ImportError:
+                raise Exception(no_sklearn_err)
+            if sklearn_version < '0.12':
+                skl_cov = ShrunkCovariance(shrinkage=reg,
+                                           store_precision=False)
+            else:
+                # init sklearn.covariance.ShrunkCovariance estimator
+                skl_cov = ShrunkCovariance(shrinkage=reg,
+                                           store_precision=False,
+                                           assume_centered=True)
+        elif isinstance(reg, six.string_types):
+            if reg == 'ledoit_wolf':
+                try:
+                    from sklearn.covariance import LedoitWolf
+                except ImportError:
+                    raise Exception(no_sklearn_err)
+                # init sklearn.covariance.LedoitWolf estimator
+                skl_cov = LedoitWolf(store_precision=False,
+                                     assume_centered=True)
+            elif reg == 'oas':
+                try:
+                    from sklearn.covariance import OAS
+                except ImportError:
+                    raise Exception(no_sklearn_err)
+                # init sklearn.covariance.OAS estimator
+                skl_cov = OAS(store_precision=False,
+                              assume_centered=True)
+            else:
+                raise ValueError("regularization parameter should be "
+                                 "'lwf' or 'oas'")
+        else:
+            raise ValueError("regularization parameter should be "
+                             "of type str or int (got %s)." % type(reg))
+
+        # compute regularized covariance using sklearn
+        cov = skl_cov.fit(data.T).covariance_
+
+    return cov
+
+
+def compute_whitener(noise_cov, info, picks=None, rank=None,
+                     scalings=None, verbose=None):
+    """Compute whitening matrix.
 
     Parameters
     ----------
@@ -687,6 +1470,14 @@ def compute_whitener(noise_cov, info, picks=None, verbose=None):
     picks : array-like of int | None
         The channels indices to include. If None the data
         channels in info, except bad channels, are used.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None
+        The rescaling method to be applied. See documentation of
+        ``prepare_noise_cov`` for details.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -704,7 +1495,8 @@ def compute_whitener(noise_cov, info, picks=None, verbose=None):
     ch_names = [info['chs'][k]['ch_name'] for k in picks]
 
     noise_cov = cp.deepcopy(noise_cov)
-    noise_cov = prepare_noise_cov(noise_cov, info, ch_names)
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
+                                  rank=rank, scalings=scalings)
     n_chan = len(ch_names)
 
     W = np.zeros((n_chan, n_chan), dtype=np.float)
@@ -722,8 +1514,10 @@ def compute_whitener(noise_cov, info, picks=None, verbose=None):
     return W, ch_names
 
 
-def whiten_evoked(evoked, noise_cov, picks, diag=False):
-    """Whiten evoked data using given noise covariance
+ at verbose
+def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
+                  scalings=None, verbose=None):
+    """Whiten evoked data using given noise covariance.
 
     Parameters
     ----------
@@ -731,45 +1525,60 @@ def whiten_evoked(evoked, noise_cov, picks, diag=False):
         The evoked data
     noise_cov : instance of Covariance
         The noise covariance
-    picks : array-like of int
-        The channel indices to whiten
-    diag : bool
-        If True, whiten using only the diagonal of the covariance
+    picks : array-like of int | None
+        The channel indices to whiten. Can be None to whiten MEG and EEG
+        data.
+    diag : bool (default False)
+        If True, whiten using only the diagonal of the covariance.
+    rank : None | int | dict (default None)
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
+    scalings : dict | None (default None)
+        To achieve reliable rank estimation on multiple sensors,
+        sensors have to be rescaled. This parameter controls the
+        rescaling. If dict, it will override the
+        following default dict (default if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     evoked_white : instance of Evoked
         The whitened evoked data.
     """
-    ch_names = [evoked.ch_names[k] for k in picks]
-    n_chan = len(ch_names)
     evoked = cp.deepcopy(evoked)
+    if picks is None:
+        picks = pick_types(evoked.info, meg=True, eeg=True)
+    W = _get_whitener_data(evoked.info, noise_cov, picks,
+                           diag=diag, rank=rank, scalings=scalings)
+    evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
+    return evoked
 
+
+ at verbose
+def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
+                       scalings=None, verbose=None):
+    """Get whitening matrix for a set of data."""
+    ch_names = [info['ch_names'][k] for k in picks]
+    noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
+    info = pick_info(info, picks)
     if diag:
         noise_cov = cp.deepcopy(noise_cov)
         noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
 
-    noise_cov = prepare_noise_cov(noise_cov, evoked.info, ch_names)
-
-    W = np.zeros((n_chan, n_chan), dtype=np.float)
-    #
-    #   Omit the zeroes due to projection
-    #
-    eig = noise_cov['eig']
-    nzero = (eig > 0)
-    W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
-    #
-    #   Rows of eigvec are the eigenvectors
-    #
-    W = np.dot(W, noise_cov['eigvec'])
-    W = np.dot(noise_cov['eigvec'].T, W)
-    evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
-    return evoked
+    scalings = _handle_default('scalings_cov_rank', scalings)
+    W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
+    return W
 
 
 @verbose
-def _read_cov(fid, node, cov_kind, verbose=None):
-    """Read a noise covariance matrix"""
+def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
+    """Read a noise covariance matrix."""
     #   Find all covariance matrices
     covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
     if len(covs) == 0:
@@ -794,6 +1603,18 @@ def _read_cov(fid, node, cov_kind, verbose=None):
             else:
                 nfree = int(tag.data)
 
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
+            if tag is None:
+                method = None
+            else:
+                method = tag.data
+
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
+            if tag is None:
+                score = None
+            else:
+                score = tag.data[0]
+
             tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
             if tag is None:
                 names = []
@@ -811,7 +1632,7 @@ def _read_cov(fid, node, cov_kind, verbose=None):
                 else:
                     #   Diagonal is stored
                     data = tag.data
-                    diagmat = True
+                    diag = True
                     logger.info('    %d x %d diagonal covariance (kind = '
                                 '%d) found.' % (dim, dim, cov_kind))
 
@@ -824,11 +1645,11 @@ def _read_cov(fid, node, cov_kind, verbose=None):
                     data[np.tril(np.ones((dim, dim))) > 0] = vals
                     data = data + data.T
                     data.flat[::dim + 1] /= 2.0
-                    diagmat = False
+                    diag = False
                     logger.info('    %d x %d full covariance (kind = %d) '
                                 'found.' % (dim, dim, cov_kind))
                 else:
-                    diagmat = False
+                    diag = False
                     data = tag.data
                     logger.info('    %d x %d sparse covariance (kind = %d)'
                                 ' found.' % (dim, dim, cov_kind))
@@ -850,9 +1671,18 @@ def _read_cov(fid, node, cov_kind, verbose=None):
             bads = read_bad_channels(fid, this)
 
             #   Put it together
-            cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
+            assert dim == len(data)
+            assert data.ndim == (1 if diag else 2)
+            cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
                        data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
                        eigvec=eigvec)
+            if score is not None:
+                cov['loglik'] = score
+            if method is not None:
+                cov['method'] = method
+            if limited:
+                del cov['kind'], cov['dim'], cov['diag']
+
             return cov
 
     logger.info('    Did not find the desired covariance matrix (kind = %d)'
@@ -862,7 +1692,7 @@ def _read_cov(fid, node, cov_kind, verbose=None):
 
 
 def _write_cov(fid, cov):
-    """Write a noise covariance matrix"""
+    """Write a noise covariance matrix."""
     start_block(fid, FIFF.FIFFB_MNE_COV)
 
     #   Dimensions etc.
@@ -900,5 +1730,186 @@ def _write_cov(fid, cov):
         write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
         end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
 
+    # estimator method
+    if 'method' in cov:
+        write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
+
+    # negative log-likelihood score
+    if 'loglik' in cov:
+        write_double(
+            fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
+
     #   Done!
     end_block(fid, FIFF.FIFFB_MNE_COV)
+
+
+def _apply_scaling_array(data, picks_list, scalings):
+    """Scale data type-dependently for estimation."""
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        picks_dict = dict(picks_list)
+        scalings = [(picks_dict[k], v) for k, v in scalings.items()
+                    if k in picks_dict]
+        for idx, scaling in scalings:
+            data[idx, :] *= scaling  # F - order
+    else:
+        data *= scalings[:, np.newaxis]  # F - order
+
+
+def _undo_scaling_array(data, picks_list, scalings):
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        scalings = dict((k, 1. / v) for k, v in scalings.items())
+    elif isinstance(scalings, np.ndarray):
+        scalings = 1. / scalings
+    return _apply_scaling_array(data, picks_list, scalings)
+
+
+def _apply_scaling_cov(data, picks_list, scalings):
+    """Scale resulting data after estimation."""
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    scales = None
+    if isinstance(scalings, dict):
+        n_channels = len(data)
+        covinds = list(zip(*picks_list))[1]
+        assert len(data) == sum(len(k) for k in covinds)
+        assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
+        scales = np.zeros(n_channels)
+        for ch_t, idx in picks_list:
+            scales[idx] = scalings[ch_t]
+    elif isinstance(scalings, np.ndarray):
+        if len(scalings) != len(data):
+            raise ValueError('Scaling factors and data are of incompatible '
+                             'shape')
+        scales = scalings
+    elif scalings is None:
+        pass
+    else:
+        raise RuntimeError('Arff...')
+    if scales is not None:
+        assert np.sum(scales == 0.) == 0
+        data *= (scales[None, :] * scales[:, None])
+
+
+def _undo_scaling_cov(data, picks_list, scalings):
+    scalings = _check_scaling_inputs(data, picks_list, scalings)
+    if isinstance(scalings, dict):
+        scalings = dict((k, 1. / v) for k, v in scalings.items())
+    elif isinstance(scalings, np.ndarray):
+        scalings = 1. / scalings
+    return _apply_scaling_cov(data, picks_list, scalings)
+
+
+def _check_scaling_inputs(data, picks_list, scalings):
+    """Aux function."""
+    rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
+
+    scalings_ = None
+    if isinstance(scalings, string_types) and scalings == 'norm':
+        scalings_ = 1. / _compute_row_norms(data)
+    elif isinstance(scalings, dict):
+        rescale_dict_.update(scalings)
+        scalings_ = rescale_dict_
+    elif isinstance(scalings, np.ndarray):
+        scalings_ = scalings
+    elif scalings is None:
+        pass
+    else:
+        raise NotImplementedError("No way! That's not a rescaling "
+                                  'option: %s' % scalings)
+    return scalings_
+
+
+def _estimate_rank_meeg_signals(data, info, scalings, tol=1e-4,
+                                return_singular=False, copy=True):
+    """Estimate rank for M/EEG data.
+
+    Parameters
+    ----------
+    data : np.ndarray of float, shape(n_channels, n_samples)
+        The M/EEG signals.
+    info : mne.io.measurement_info.Info
+        The measurment info.
+    scalings : dict | 'norm' | np.ndarray | None
+        The rescaling method to be applied. If dict, it will override the
+        following default dict:
+
+            dict(mag=1e15, grad=1e13, eeg=1e6)
+
+        If 'norm' data will be scaled by channel-wise norms. If array,
+        pre-specified norms will be used. If None, no scaling will be applied.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    picks_list = _picks_by_type(info)
+    _apply_scaling_array(data, picks_list, scalings)
+    if data.shape[1] < data.shape[0]:
+        ValueError("You've got fewer samples than channels, your "
+                   "rank estimate might be inaccurate.")
+    out = estimate_rank(data, tol=tol, norm=False,
+                        return_singular=return_singular, copy=copy)
+    rank = out[0] if isinstance(out, tuple) else out
+    ch_type = ' + '.join(list(zip(*picks_list))[0])
+    logger.info('estimated rank (%s): %d' % (ch_type, rank))
+    _undo_scaling_array(data, picks_list, scalings)
+    return out
+
+
+def _estimate_rank_meeg_cov(data, info, scalings, tol=1e-4,
+                            return_singular=False, copy=True):
+    """Estimate rank for M/EEG data.
+
+    Parameters
+    ----------
+    data : np.ndarray of float, shape (n_channels, n_channels)
+        The M/EEG covariance.
+    info : mne.io.measurement_info.Info
+        The measurment info.
+    scalings : dict | 'norm' | np.ndarray | None
+        The rescaling method to be applied. If dict, it will override the
+        following default dict:
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+        If 'norm' data will be scaled by channel-wise norms. If array,
+        pre-specified norms will be used. If None, no scaling will be applied.
+    return_singular : bool
+        If True, also return the singular values that were used
+        to determine the rank.
+    copy : bool
+        If False, values in data will be modified in-place during
+        rank estimation (saves memory).
+
+    Returns
+    -------
+    rank : int
+        Estimated rank of the data.
+    s : array
+        If return_singular is True, the singular values that were
+        thresholded to determine the rank are also returned.
+    """
+    picks_list = _picks_by_type(info)
+    scalings = _handle_default('scalings_cov_rank', scalings)
+    _apply_scaling_cov(data, picks_list, scalings)
+    if data.shape[1] < data.shape[0]:
+        ValueError("You've got fewer samples than channels, your "
+                   "rank estimate might be inaccurate.")
+    out = estimate_rank(data, tol=tol, norm=False,
+                        return_singular=return_singular, copy=copy)
+    rank = out[0] if isinstance(out, tuple) else out
+    ch_type = ' + '.join(list(zip(*picks_list))[0])
+    logger.info('estimated rank (%s): %d' % (ch_type, rank))
+    _undo_scaling_cov(data, picks_list, scalings)
+    return out
diff --git a/mne/cuda.py b/mne/cuda.py
index b11efad..e17b0be 100644
--- a/mne/cuda.py
+++ b/mne/cuda.py
@@ -4,103 +4,104 @@
 
 import numpy as np
 from scipy.fftpack import fft, ifft
-try:
-    import pycuda.gpuarray as gpuarray
-    from pycuda.driver import mem_get_info
-    from scikits.cuda import fft as cudafft
-except (ImportError, OSError):
-    # need OSError because scikits.cuda throws it if cufft not found
-    pass
 
-from .utils import sizeof_fmt, logger
+from .utils import sizeof_fmt, logger, get_config
 
 
 # Support CUDA for FFTs; requires scikits.cuda and pycuda
-cuda_capable = False
-cuda_multiply_inplace_complex128 = None
-cuda_halve_value_complex128 = None
-cuda_real_value_complex128 = None
-requires_cuda = np.testing.dec.skipif(True, 'CUDA not initialized')
+_cuda_capable = False
+_multiply_inplace_c128 = _halve_c128 = _real_c128 = None
 
 
-def init_cuda():
+def _get_cudafft():
+    """Helper to deal with scikit-cuda namespace change"""
+    try:
+        from skcuda import fft
+    except ImportError:
+        try:
+            from scikits.cuda import fft
+        except ImportError:
+            fft = None
+    return fft
+
+
+def get_cuda_memory():
+    """Get the amount of free memory for CUDA operations
+
+    Returns
+    -------
+    memory : str
+        The amount of available memory as a human-readable string.
+    """
+    if not _cuda_capable:
+        logger.warning('CUDA not enabled, returning zero for memory')
+        mem = 0
+    else:
+        from pycuda.driver import mem_get_info
+        mem = mem_get_info()[0]
+    return sizeof_fmt(mem)
+
+
+def init_cuda(ignore_config=False):
     """Initialize CUDA functionality
 
     This function attempts to load the necessary interfaces
-    (hardware connectivity) to run CUDA-based filering. This
+    (hardware connectivity) to run CUDA-based filtering. This
     function should only need to be run once per session.
 
     If the config var (set via mne.set_config or in ENV)
     MNE_USE_CUDA == 'true', this function will be executed when
-    importing mne. If this variable is not set, this function can
-    be manually executed.
+    the first CUDA setup is performed. If this variable is not
+    set, this function can be manually executed.
     """
-    global cuda_capable
-    global cuda_multiply_inplace_complex128
-    global cuda_halve_value_complex128
-    global cuda_real_value_complex128
-    global requires_cuda
-    if cuda_capable is True:
-        logger.info('CUDA previously enabled, currently %s available memory'
-                    % sizeof_fmt(mem_get_info()[0]))
+    global _cuda_capable, _multiply_inplace_c128, _halve_c128, _real_c128
+    if _cuda_capable:
+        return
+    if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
+                              'true'):
+        logger.info('CUDA not enabled in config, skipping initialization')
         return
     # Triage possible errors for informative messaging
-    cuda_capable = False
+    _cuda_capable = False
     try:
-        import pycuda.gpuarray
-        import pycuda.driver
+        from pycuda import gpuarray, driver  # noqa
+        from pycuda.elementwise import ElementwiseKernel
     except ImportError:
         logger.warning('module pycuda not found, CUDA not enabled')
-    else:
-        try:
-            # Initialize CUDA; happens with importing autoinit
-            import pycuda.autoinit
-        except ImportError:
-            logger.warning('pycuda.autoinit could not be imported, likely '
-                           'a hardware error, CUDA not enabled')
-        else:
-            # Make our multiply inplace kernel
-            try:
-                from pycuda.elementwise import ElementwiseKernel
-                # let's construct our own CUDA multiply in-place function
-                dtype = 'pycuda::complex<double>'
-                cuda_multiply_inplace_complex128 = \
-                    ElementwiseKernel(dtype + ' *a, ' + dtype + ' *b',
-                                      'b[i] *= a[i]', 'multiply_inplace')
-                cuda_halve_value_complex128 = \
-                    ElementwiseKernel(dtype + ' *a', 'a[i] /= 2.0',
-                                      'halve_value')
-                cuda_real_value_complex128 = \
-                    ElementwiseKernel(dtype + ' *a', 'a[i] = real(a[i])',
-                                      'real_value')
-            except:
-                # This should never happen
-                raise RuntimeError('pycuda ElementwiseKernel could not be '
-                                   'constructed, please report this issue '
-                                   'to mne-python developers with your '
-                                   'system information and pycuda version')
-            else:
-                # Make sure scikits.cuda is installed
-                try:
-                    from scikits.cuda import fft as cudafft
-                except ImportError:
-                    logger.warning('module scikits.cuda not found, CUDA not '
-                                   'enabled')
-                else:
-                    # Make sure we can use 64-bit FFTs
-                    try:
-                        fft_plan = cudafft.Plan(16, np.float64, np.complex128)
-                        del fft_plan
-                    except:
-                        logger.warning('Device does not support 64-bit FFTs, '
-                                       'CUDA not enabled')
-                    else:
-                        cuda_capable = True
-                        # Figure out limit for CUDA FFT calculations
-                        logger.info('Enabling CUDA with %s available memory'
-                                    % sizeof_fmt(mem_get_info()[0]))
-    requires_cuda = np.testing.dec.skipif(not cuda_capable,
-                                          'CUDA not initialized')
+        return
+    try:
+        # Initialize CUDA; happens with importing autoinit
+        import pycuda.autoinit  # noqa
+    except ImportError:
+        logger.warning('pycuda.autoinit could not be imported, likely '
+                       'a hardware error, CUDA not enabled')
+        return
+    # Make sure scikit-cuda is installed
+    cudafft = _get_cudafft()
+    if cudafft is None:
+        logger.warning('module scikit-cuda not found, CUDA not '
+                       'enabled')
+        return
+
+    # let's construct our own CUDA multiply in-place function
+    _multiply_inplace_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a, pycuda::complex<double> *b',
+        'b[i] *= a[i]', 'multiply_inplace')
+    _halve_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a', 'a[i] /= 2.0', 'halve_value')
+    _real_c128 = ElementwiseKernel(
+        'pycuda::complex<double> *a', 'a[i] = real(a[i])', 'real_value')
+
+    # Make sure we can use 64-bit FFTs
+    try:
+        cudafft.Plan(16, np.float64, np.complex128)  # will get auto-GC'ed
+    except:
+        logger.warning('Device does not support 64-bit FFTs, '
+                       'CUDA not enabled')
+        return
+    _cuda_capable = True
+    # Figure out limit for CUDA FFT calculations
+    logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
 
 
 ###############################################################################
@@ -148,44 +149,34 @@ def setup_cuda_fft_multiply_repeated(n_jobs, h_fft):
     This function is designed to be used with fft_multiply_repeated().
     """
     cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
-                     x_fft=None, x=None, fft_len=None)
+                     x_fft=None, x=None)
     n_fft = len(h_fft)
+    cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
     if n_jobs == 'cuda':
         n_jobs = 1
-        if cuda_capable:
+        init_cuda()
+        if _cuda_capable:
+            from pycuda import gpuarray
+            cudafft = _get_cudafft()
             # set up all arrays necessary for CUDA
-            cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
-            use_cuda = False
             # try setting up for float64
             try:
-                fft_plan = cudafft.Plan(n_fft, np.float64, np.complex128)
-                ifft_plan = cudafft.Plan(n_fft, np.complex128, np.float64)
-                x_fft = gpuarray.empty(cuda_fft_len, np.complex128)
-                x = gpuarray.empty(int(n_fft), np.float64)
-                cuda_h_fft = h_fft[:cuda_fft_len].astype('complex128')
                 # do the IFFT normalization now so we don't have to later
-                cuda_h_fft /= len(h_fft)
-                h_fft = gpuarray.to_gpu(cuda_h_fft)
-                dtype = np.float64
-                multiply_inplace = cuda_multiply_inplace_complex128
-            except:
+                h_fft = gpuarray.to_gpu(h_fft[:cuda_fft_len]
+                                        .astype('complex_') / len(h_fft))
+                cuda_dict.update(
+                    use_cuda=True,
+                    fft_plan=cudafft.Plan(n_fft, np.float64, np.complex128),
+                    ifft_plan=cudafft.Plan(n_fft, np.complex128, np.float64),
+                    x_fft=gpuarray.empty(cuda_fft_len, np.complex128),
+                    x=gpuarray.empty(int(n_fft), np.float64))
+                logger.info('Using CUDA for FFT FIR filtering')
+            except Exception:
                 logger.info('CUDA not used, could not instantiate memory '
                             '(arrays may be too large), falling back to '
                             'n_jobs=1')
-            else:
-                use_cuda = True
-
-            if use_cuda is True:
-                logger.info('Using CUDA for FFT FIR filtering')
-                cuda_dict['use_cuda'] = True
-                cuda_dict['fft_plan'] = fft_plan
-                cuda_dict['ifft_plan'] = ifft_plan
-                cuda_dict['x_fft'] = x_fft
-                cuda_dict['x'] = x
-                cuda_dict['dtype'] = dtype
-                cuda_dict['multiply_inplace'] = multiply_inplace
         else:
-            logger.info('CUDA not used, CUDA has not been initialized, '
+            logger.info('CUDA not used, CUDA could not be initialized, '
                         'falling back to n_jobs=1')
     return n_jobs, cuda_dict, h_fft
 
@@ -211,10 +202,11 @@ def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
         # do the fourier-domain operations
         x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
     else:
+        cudafft = _get_cudafft()
         # do the fourier-domain operations, results in second param
-        cuda_dict['x'].set(x.astype(cuda_dict['dtype']))
+        cuda_dict['x'].set(x.astype(np.float64))
         cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
-        cuda_dict['multiply_inplace'](h_fft, cuda_dict['x_fft'])
+        _multiply_inplace_c128(h_fft, cuda_dict['x_fft'])
         # If we wanted to do it locally instead of using our own kernel:
         # cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
         cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
@@ -272,48 +264,35 @@ def setup_cuda_fft_resample(n_jobs, W, new_len):
     """
     cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
                      x_fft=None, x=None, y_fft=None, y=None)
+    n_fft_x, n_fft_y = len(W), new_len
+    cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
+    cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
     if n_jobs == 'cuda':
         n_jobs = 1
-        if cuda_capable:
-            use_cuda = False
+        init_cuda()
+        if _cuda_capable:
             # try setting up for float64
+            from pycuda import gpuarray
+            cudafft = _get_cudafft()
             try:
-                n_fft_x = len(W)
-                cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
-                n_fft_y = new_len
-                cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
-                fft_plan = cudafft.Plan(n_fft_x, np.float64, np.complex128)
-                ifft_plan = cudafft.Plan(n_fft_y, np.complex128, np.float64)
-                x_fft = gpuarray.zeros(max(cuda_fft_len_x,
-                                           cuda_fft_len_y), np.complex128)
-                x = gpuarray.empty(max(int(n_fft_x),
-                                       int(n_fft_y)), np.float64)
-                cuda_W = W[:cuda_fft_len_x].astype('complex128')
                 # do the IFFT normalization now so we don't have to later
-                cuda_W /= n_fft_y
-                W = gpuarray.to_gpu(cuda_W)
-                dtype = np.float64
-                multiply_inplace = cuda_multiply_inplace_complex128
-            except:
+                W = gpuarray.to_gpu(W[:cuda_fft_len_x]
+                                    .astype('complex_') / n_fft_y)
+                cuda_dict.update(
+                    use_cuda=True,
+                    fft_plan=cudafft.Plan(n_fft_x, np.float64, np.complex128),
+                    ifft_plan=cudafft.Plan(n_fft_y, np.complex128, np.float64),
+                    x_fft=gpuarray.zeros(max(cuda_fft_len_x,
+                                             cuda_fft_len_y), np.complex128),
+                    x=gpuarray.empty(max(int(n_fft_x),
+                                     int(n_fft_y)), np.float64))
+                logger.info('Using CUDA for FFT resampling')
+            except Exception:
                 logger.info('CUDA not used, could not instantiate memory '
                             '(arrays may be too large), falling back to '
                             'n_jobs=1')
-            else:
-                use_cuda = True
-
-            if use_cuda is True:
-                logger.info('Using CUDA for FFT FIR filtering')
-                cuda_dict['use_cuda'] = True
-                cuda_dict['fft_plan'] = fft_plan
-                cuda_dict['ifft_plan'] = ifft_plan
-                cuda_dict['x_fft'] = x_fft
-                cuda_dict['x'] = x
-                cuda_dict['dtype'] = dtype
-                cuda_dict['multiply_inplace'] = multiply_inplace
-                cuda_dict['halve_value'] = cuda_halve_value_complex128
-                cuda_dict['real_value'] = cuda_real_value_complex128
         else:
-            logger.info('CUDA not used, CUDA has not been initialized, '
+            logger.info('CUDA not used, CUDA could not be initialized, '
                         'falling back to n_jobs=1')
     return n_jobs, cuda_dict, W
 
@@ -325,7 +304,7 @@ def fft_resample(x, W, new_len, npad, to_remove,
     Parameters
     ----------
     x : 1-d array
-        The array to resample.
+        The array to resample. Will be converted to float64 if necessary.
     W : 1-d array or gpuarray
         The filtering function to apply.
     new_len : int
@@ -343,45 +322,40 @@ def fft_resample(x, W, new_len, npad, to_remove,
         Filtered version of x.
     """
     # add some padding at beginning and end to make this work a little cleaner
+    if x.dtype != np.float64:
+        x = x.astype(np.float64)
     x = _smart_pad(x, npad)
     old_len = len(x)
+    shorter = new_len < old_len
     if not cuda_dict['use_cuda']:
         N = int(min(new_len, old_len))
         sl_1 = slice((N + 1) // 2)
         y_fft = np.zeros(new_len, np.complex128)
-        x_fft = fft(x).ravel()
-        x_fft *= W
+        x_fft = fft(x).ravel() * W
         y_fft[sl_1] = x_fft[sl_1]
         sl_2 = slice(-(N - 1) // 2, None)
         y_fft[sl_2] = x_fft[sl_2]
         y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
     else:
-        if old_len < new_len:
-            x = np.concatenate((x, np.zeros(new_len - old_len, x.dtype)))
-        cuda_dict['x'].set(x)
+        cudafft = _get_cudafft()
+        cuda_dict['x'].set(np.concatenate((x, np.zeros(max(new_len - old_len,
+                                                           0), x.dtype))))
         # do the fourier-domain operations, results put in second param
         cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
-        cuda_dict['multiply_inplace'](W, cuda_dict['x_fft'])
+        _multiply_inplace_c128(W, cuda_dict['x_fft'])
         # This is not straightforward, but because x_fft and y_fft share
         # the same data (and only one half of the full DFT is stored), we
         # don't have to transfer the slice like we do in scipy. All we
         # need to worry about is the Nyquist component, either halving it
         # or taking just the real component...
-        if new_len > old_len:
-            if old_len % 2 == 0:
-                nyq = int((old_len - (old_len % 2)) // 2)
-                cuda_dict['halve_value'](cuda_dict['x_fft'],
-                                        slice=slice(nyq, nyq + 1))
-        else:
-            if new_len % 2 == 0:
-                nyq = int((new_len - (new_len % 2)) // 2)
-                cuda_dict['real_value'](cuda_dict['x_fft'],
-                                        slice=slice(nyq, nyq + 1))
+        use_len = new_len if shorter else old_len
+        func = _real_c128 if shorter else _halve_c128
+        if use_len % 2 == 0:
+            nyq = int((use_len - (use_len % 2)) // 2)
+            func(cuda_dict['x_fft'], slice=slice(nyq, nyq + 1))
         cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
                      cuda_dict['ifft_plan'], scale=False)
-        y = cuda_dict['x'].get()
-        if new_len < old_len:
-            y = y[:new_len].copy()
+        y = cuda_dict['x'].get()[:new_len if shorter else None]
 
     # now let's trim it back to the correct size (if there was padding)
     if to_remove > 0:
@@ -400,7 +374,11 @@ def fft_resample(x, W, new_len, npad, to_remove,
 def _smart_pad(x, n_pad):
     """Pad vector x
     """
+    if n_pad == 0:
+        return x
+    elif n_pad < 0:
+        raise RuntimeError('n_pad must be non-negative')
     # need to pad with zeros if len(x) <= npad
     z_pad = np.zeros(max(n_pad - len(x) + 1, 0), dtype=x.dtype)
-    return np.r_[z_pad, 2 * x[0] - x[n_pad:0:-1], x,
-                 2 * x[-1] - x[-2:-n_pad - 2:-1], z_pad]
+    return np.concatenate([z_pad, 2 * x[0] - x[n_pad:0:-1], x,
+                           2 * x[-1] - x[-2:-n_pad - 2:-1], z_pad])
diff --git a/mne/data/FreeSurferColorLUT.txt b/mne/data/FreeSurferColorLUT.txt
new file mode 100644
index 0000000..2b85ef3
--- /dev/null
+++ b/mne/data/FreeSurferColorLUT.txt
@@ -0,0 +1,1397 @@
+#$Id: FreeSurferColorLUT.txt,v 1.70.2.7 2012/08/27 17:20:08 nicks Exp $
+
+#No. Label Name:                            R   G   B   A
+
+0   Unknown                                 0   0   0   0
+1   Left-Cerebral-Exterior                  70  130 180 0
+2   Left-Cerebral-White-Matter              245 245 245 0
+3   Left-Cerebral-Cortex                    205 62  78  0
+4   Left-Lateral-Ventricle                  120 18  134 0
+5   Left-Inf-Lat-Vent                       196 58  250 0
+6   Left-Cerebellum-Exterior                0   148 0   0
+7   Left-Cerebellum-White-Matter            220 248 164 0
+8   Left-Cerebellum-Cortex                  230 148 34  0
+9   Left-Thalamus                           0   118 14  0
+10  Left-Thalamus-Proper                    0   118 14  0
+11  Left-Caudate                            122 186 220 0
+12  Left-Putamen                            236 13  176 0
+13  Left-Pallidum                           12  48  255 0
+14  3rd-Ventricle                           204 182 142 0
+15  4th-Ventricle                           42  204 164 0
+16  Brain-Stem                              119 159 176 0
+17  Left-Hippocampus                        220 216 20  0
+18  Left-Amygdala                           103 255 255 0
+19  Left-Insula                             80  196 98  0
+20  Left-Operculum                          60  58  210 0
+21  Line-1                                  60  58  210 0
+22  Line-2                                  60  58  210 0
+23  Line-3                                  60  58  210 0
+24  CSF                                     60  60  60  0
+25  Left-Lesion                             255 165 0   0
+26  Left-Accumbens-area                     255 165 0   0
+27  Left-Substancia-Nigra                   0   255 127 0
+28  Left-VentralDC                          165 42  42  0
+29  Left-undetermined                       135 206 235 0
+30  Left-vessel                             160 32  240 0
+31  Left-choroid-plexus                     0   200 200 0
+32  Left-F3orb                              100 50  100 0
+33  Left-lOg                                135 50  74  0
+34  Left-aOg                                122 135 50  0
+35  Left-mOg                                51  50  135 0
+36  Left-pOg                                74  155 60  0
+37  Left-Stellate                           120 62  43  0
+38  Left-Porg                               74  155 60  0
+39  Left-Aorg                               122 135 50  0
+40  Right-Cerebral-Exterior                 70  130 180 0
+41  Right-Cerebral-White-Matter             0   225 0   0
+42  Right-Cerebral-Cortex                   205 62  78  0
+43  Right-Lateral-Ventricle                 120 18  134 0
+44  Right-Inf-Lat-Vent                      196 58  250 0
+45  Right-Cerebellum-Exterior               0   148 0   0
+46  Right-Cerebellum-White-Matter           220 248 164 0
+47  Right-Cerebellum-Cortex                 230 148 34  0
+48  Right-Thalamus                          0   118 14  0
+49  Right-Thalamus-Proper                   0   118 14  0
+50  Right-Caudate                           122 186 220 0
+51  Right-Putamen                           236 13  176 0
+52  Right-Pallidum                          13  48  255 0
+53  Right-Hippocampus                       220 216 20  0
+54  Right-Amygdala                          103 255 255 0
+55  Right-Insula                            80  196 98  0
+56  Right-Operculum                         60  58  210 0
+57  Right-Lesion                            255 165 0   0
+58  Right-Accumbens-area                    255 165 0   0
+59  Right-Substancia-Nigra                  0   255 127 0
+60  Right-VentralDC                         165 42  42  0
+61  Right-undetermined                      135 206 235 0
+62  Right-vessel                            160 32  240 0
+63  Right-choroid-plexus                    0   200 221 0
+64  Right-F3orb                             100 50  100 0
+65  Right-lOg                               135 50  74  0
+66  Right-aOg                               122 135 50  0
+67  Right-mOg                               51  50  135 0
+68  Right-pOg                               74  155 60  0
+69  Right-Stellate                          120 62  43  0
+70  Right-Porg                              74  155 60  0
+71  Right-Aorg                              122 135 50  0
+72  5th-Ventricle                           120 190 150 0
+73  Left-Interior                           122 135 50  0
+74  Right-Interior                          122 135 50  0
+# 75/76 removed. duplicates of 4/43
+77  WM-hypointensities                      200 70  255 0
+78  Left-WM-hypointensities                 255 148 10  0
+79  Right-WM-hypointensities                255 148 10  0
+80  non-WM-hypointensities                  164 108 226 0
+81  Left-non-WM-hypointensities             164 108 226 0
+82  Right-non-WM-hypointensities            164 108 226 0
+83  Left-F1                                 255 218 185 0
+84  Right-F1                                255 218 185 0
+85  Optic-Chiasm                            234 169 30  0
+192 Corpus_Callosum                         250 255 50  0
+
+86  Left_future_WMSA                        200 120  255 0
+87  Right_future_WMSA                       200 121  255 0
+88  future_WMSA                             200 122  255 0
+
+
+96  Left-Amygdala-Anterior                  205 10  125 0
+97  Right-Amygdala-Anterior                 205 10  125 0
+98  Dura                                    160 32  240 0
+
+100 Left-wm-intensity-abnormality           124 140 178 0
+101 Left-caudate-intensity-abnormality      125 140 178 0
+102 Left-putamen-intensity-abnormality      126 140 178 0
+103 Left-accumbens-intensity-abnormality    127 140 178 0
+104 Left-pallidum-intensity-abnormality     124 141 178 0
+105 Left-amygdala-intensity-abnormality     124 142 178 0
+106 Left-hippocampus-intensity-abnormality  124 143 178 0
+107 Left-thalamus-intensity-abnormality     124 144 178 0
+108 Left-VDC-intensity-abnormality          124 140 179 0
+109 Right-wm-intensity-abnormality          124 140 178 0
+110 Right-caudate-intensity-abnormality     125 140 178 0
+111 Right-putamen-intensity-abnormality     126 140 178 0
+112 Right-accumbens-intensity-abnormality   127 140 178 0
+113 Right-pallidum-intensity-abnormality    124 141 178 0
+114 Right-amygdala-intensity-abnormality    124 142 178 0
+115 Right-hippocampus-intensity-abnormality 124 143 178 0
+116 Right-thalamus-intensity-abnormality    124 144 178 0
+117 Right-VDC-intensity-abnormality         124 140 179 0
+
+118 Epidermis                               255 20  147 0
+119 Conn-Tissue                             205 179 139 0
+120 SC-Fat-Muscle                           238 238 209 0
+121 Cranium                                 200 200 200 0
+122 CSF-SA                                  74  255 74  0
+123 Muscle                                  238 0   0   0
+124 Ear                                     0   0   139 0
+125 Adipose                                 173 255 47  0
+126 Spinal-Cord                             133 203 229 0
+127 Soft-Tissue                             26  237 57  0
+128 Nerve                                   34  139 34  0
+129 Bone                                    30  144 255 0
+130 Air                                     147 19  173 0
+131 Orbital-Fat                             238 59  59  0
+132 Tongue                                  221 39  200 0
+133 Nasal-Structures                        238 174 238 0
+134 Globe                                   255 0   0   0
+135 Teeth                                   72  61  139 0
+136 Left-Caudate-Putamen                    21  39  132 0
+137 Right-Caudate-Putamen                   21  39  132 0
+138 Left-Claustrum                          65  135 20  0
+139 Right-Claustrum                         65  135 20  0
+140 Cornea                                  134 4   160 0
+142 Diploe                                  221 226 68  0
+143 Vitreous-Humor                          255 255 254 0
+144 Lens                                    52  209 226 0
+145 Aqueous-Humor                           239 160 223 0
+146 Outer-Table                             70  130 180 0
+147 Inner-Table                             70  130 181 0
+148 Periosteum                              139 121 94  0
+149 Endosteum                               224 224 224 0
+150 R-C-S                                   255 0   0   0
+151 Iris                                    205 205 0   0
+152 SC-Adipose-Muscle                       238 238 209 0
+153 SC-Tissue                               139 121 94  0
+154 Orbital-Adipose                         238 59  59  0
+
+155 Left-IntCapsule-Ant                     238 59  59  0
+156 Right-IntCapsule-Ant                    238 59  59  0
+157 Left-IntCapsule-Pos                     62  10  205 0
+158 Right-IntCapsule-Pos                    62  10  205 0
+
+# These labels are for babies/children
+159 Left-Cerebral-WM-unmyelinated           0   118 14  0
+160 Right-Cerebral-WM-unmyelinated          0   118 14  0
+161 Left-Cerebral-WM-myelinated             220 216 21  0
+162 Right-Cerebral-WM-myelinated            220 216 21  0
+163 Left-Subcortical-Gray-Matter            122 186 220 0
+164 Right-Subcortical-Gray-Matter           122 186 220 0
+165 Skull                                   255 165 0   0
+166 Posterior-fossa                         14  48  255 0
+167 Scalp                                   166 42  42  0
+168 Hematoma                                121 18  134 0
+169 Left-Basal-Ganglia                      236 13  127 0
+176 Right-Basal-Ganglia                     236 13  126 0
+
+# Label names and colors for Brainstem consituents
+# No.  Label Name:                          R   G   B   A
+170 brainstem                               119 159 176 0
+171 DCG                                     119 0   176 0
+172 Vermis                                  119 100 176 0
+173 Midbrain                                119 200 176 0
+174 Pons                                    119 159 100 0
+175 Medulla                                 119 159 200 0
+
+#176 Right-Basal-Ganglia   found in babies/children section above
+
+180 Left-Cortical-Dysplasia                 73  61  139 0
+181 Right-Cortical-Dysplasia                73  62  139 0
+
+#192 Corpus_Callosum  listed after #85 above
+193 Left-hippocampal_fissure                0   196 255 0
+194 Left-CADG-head                          255 164 164 0
+195 Left-subiculum                          196 196 0   0
+196 Left-fimbria                            0   100 255 0
+197 Right-hippocampal_fissure               128 196 164 0
+198 Right-CADG-head                         0   126 75  0
+199 Right-subiculum                         128 96  64  0
+200 Right-fimbria                           0   50  128 0
+201 alveus                                  255 204 153 0
+202 perforant_pathway                       255 128 128 0
+203 parasubiculum                           255 255 0   0
+204 presubiculum                            64  0   64  0
+205 subiculum                               0   0   255 0
+206 CA1                                     255 0   0   0
+207 CA2                                     128 128 255 0
+208 CA3                                     0   128 0   0
+209 CA4                                     196 160 128 0
+210 GC-ML-DG                                32  200 255 0
+211 HATA                                    128 255 128 0
+212 fimbria                                 204 153 204 0
+213 lateral_ventricle                       121 17  136 0
+214 molecular_layer_HP                      128 0   0   0
+215 hippocampal_fissure                     128 32  255 0
+216 entorhinal_cortex                       255 204 102 0
+217 molecular_layer_subiculum               128 128 128 0
+218 Amygdala                                104 255 255 0
+219 Cerebral_White_Matter                   0   226 0   0
+220 Cerebral_Cortex                         205 63  78  0
+221 Inf_Lat_Vent                            197 58  250 0
+222 Perirhinal                              33  150 250 0
+223 Cerebral_White_Matter_Edge              226 0   0   0
+224 Background                              100 100 100 0
+225 Ectorhinal                              197 150 250 0
+226 HP_tail                                 170 170 255 0
+
+250 Fornix                                  255 0   0   0
+251 CC_Posterior                            0   0   64  0
+252 CC_Mid_Posterior                        0   0   112 0
+253 CC_Central                              0   0   160 0
+254 CC_Mid_Anterior                         0   0   208 0
+255 CC_Anterior                             0   0   255 0
+
+# This is for keeping track of voxel changes
+256 Voxel-Unchanged                         0   0   0   0
+
+# lymph node and vascular labels
+331 Aorta                                   255 0   0   0
+332 Left-Common-IliacA                      255 80  0   0
+333 Right-Common-IliacA                     255 160 0   0
+334 Left-External-IliacA                    255 255 0   0
+335 Right-External-IliacA                   0   255 0   0
+336 Left-Internal-IliacA                    255 0   160 0
+337 Right-Internal-IliacA                   255 0   255 0
+338 Left-Lateral-SacralA                    255 50  80  0
+339 Right-Lateral-SacralA                   80  255 50  0
+340 Left-ObturatorA                         160 255 50  0
+341 Right-ObturatorA                        160 200 255 0
+342 Left-Internal-PudendalA                 0   255 160 0
+343 Right-Internal-PudendalA                0   0   255 0
+344 Left-UmbilicalA                         80  50  255 0
+345 Right-UmbilicalA                        160 0   255 0
+346 Left-Inf-RectalA                        255 210 0   0
+347 Right-Inf-RectalA                       0   160 255 0
+348 Left-Common-IliacV                      255 200 80  0
+349 Right-Common-IliacV                     255 200 160 0
+350 Left-External-IliacV                    255 80  200 0
+351 Right-External-IliacV                   255 160 200 0
+352 Left-Internal-IliacV                    30  255 80  0
+353 Right-Internal-IliacV                   80  200 255 0
+354 Left-ObturatorV                         80  255 200 0
+355 Right-ObturatorV                        195 255 200 0
+356 Left-Internal-PudendalV                 120 200 20  0
+357 Right-Internal-PudendalV                170 10  200 0
+358 Pos-Lymph                               20  130 180 0
+359 Neg-Lymph                               20  180 130 0
+
+400 V1                                      206 62  78  0
+401 V2                                      121 18  134 0
+402 BA44                                    199 58  250 0
+403 BA45                                    1   148 0   0
+404 BA4a                                    221 248 164 0
+405 BA4p                                    231 148 34  0
+406 BA6                                     1   118 14  0
+407 BA2                                     120 118 14  0
+408 BA1_old                                 123 186 221 0
+409 BAun2                                   238 13  177 0
+410 BA1                                     123 186 220 0
+411 BA2b                                    138 13  206 0
+412 BA3a                                    238 130 176 0
+413 BA3b                                    218 230 76  0
+414 MT                                      38  213 176 0
+415 AIPS_AIP_l                              1   225 176 0
+416 AIPS_AIP_r                              1   225 176 0
+417 AIPS_VIP_l                              200 2   100 0
+418 AIPS_VIP_r                              200 2   100 0
+419 IPL_PFcm_l                              5   200 90  0
+420 IPL_PFcm_r                              5   200 90  0
+421 IPL_PF_l                                100 5   200 0
+422 IPL_PFm_l                               25  255 100 0
+423 IPL_PFm_r                               25  255 100 0
+424 IPL_PFop_l                              230 7   100 0
+425 IPL_PFop_r                              230 7   100 0
+426 IPL_PF_r                                100 5   200 0
+427 IPL_PFt_l                               150 10  200 0
+428 IPL_PFt_r                               150 10  200 0
+429 IPL_PGa_l                               175 10  176 0
+430 IPL_PGa_r                               175 10  176 0
+431 IPL_PGp_l                               10  100 255 0
+432 IPL_PGp_r                               10  100 255 0
+433 Visual_V3d_l                            150 45  70  0
+434 Visual_V3d_r                            150 45  70  0
+435 Visual_V4_l                             45  200 15  0
+436 Visual_V4_r                             45  200 15  0
+437 Visual_V5_b                             227 45  100 0
+438 Visual_VP_l                             227 45  100 0
+439 Visual_VP_r                             227 45  100 0
+
+# wm lesions
+498 wmsa                                    143 188 143 0
+499 other_wmsa                              255 248 220 0
+
+# HiRes Hippocampus labeling
+500 right_CA2_3                             17  85  136 0
+501 right_alveus                            119 187 102 0
+502 right_CA1                               204 68  34  0
+503 right_fimbria                           204 0   255 0
+504 right_presubiculum                      221 187 17  0
+505 right_hippocampal_fissure               153 221 238 0
+506 right_CA4_DG                            51  17  17  0
+507 right_subiculum                         0   119 85  0
+508 right_fornix                            20  100 200 0
+
+550 left_CA2_3                              17  85  137 0
+551 left_alveus                             119 187 103 0
+552 left_CA1                                204 68  35  0
+553 left_fimbria                            204 0   254 0
+554 left_presubiculum                       221 187 16  0
+555 left_hippocampal_fissure                153 221 239 0
+556 left_CA4_DG                             51  17  18  0
+557 left_subiculum                          0   119 86  0
+558 left_fornix                             20  100 201 0
+
+600 Tumor                                   254 254 254 0
+
+
+# Cerebellar parcellation labels from SUIT (matches labels in cma.h)
+#No. Label Name:                            R   G   B   A
+601  Cbm_Left_I_IV                          70  130 180 0
+602  Cbm_Right_I_IV                         245 245 245 0
+603  Cbm_Left_V                             205 62  78  0
+604  Cbm_Right_V                            120 18  134 0
+605  Cbm_Left_VI                            196 58  250 0
+606  Cbm_Vermis_VI                          0   148 0   0
+607  Cbm_Right_VI                           220 248 164 0
+608  Cbm_Left_CrusI                         230 148 34  0
+609  Cbm_Vermis_CrusI                       0   118 14  0
+610  Cbm_Right_CrusI                        0   118 14  0
+611  Cbm_Left_CrusII                        122 186 220 0
+612  Cbm_Vermis_CrusII                      236 13  176 0
+613  Cbm_Right_CrusII 12                    48  255 0
+614  Cbm_Left_VIIb                          204 182 142 0
+615  Cbm_Vermis_VIIb                        42  204 164 0
+616  Cbm_Right_VIIb                         119 159 176 0
+617  Cbm_Left_VIIIa                         220 216 20  0
+618  Cbm_Vermis_VIIIa                       103 255 255 0
+619  Cbm_Right_VIIIa                        80  196 98  0
+620  Cbm_Left_VIIIb                         60  58  210 0
+621  Cbm_Vermis_VIIIb                       60  58  210 0
+622  Cbm_Right_VIIIb                        60  58  210 0
+623  Cbm_Left_IX                            60  58  210 0
+624  Cbm_Vermis_IX                          60  60  60  0
+625  Cbm_Right_IX                           255 165 0   0
+626  Cbm_Left_X                             255 165 0   0
+627  Cbm_Vermis_X                           0   255 127 0
+628  Cbm_Right_X                            165 42  42  0
+
+# Cerebellar lobule parcellations
+640  Cbm_Right_I_V_med                      204  0  0   0
+641  Cbm_Right_I_V_mid                      255  0  0   0
+642  Cbm_Right_VI_med                       0    0  255 0
+643  Cbm_Right_VI_mid                       30  144 255 0
+644  Cbm_Right_VI_lat                       100 212 237 0
+645  Cbm_Right_CrusI_med                    218 165 32  0
+646  Cbm_Right_CrusI_mid                    255 215 0   0
+647  Cbm_Right_CrusI_lat                    255 255 166 0
+648  Cbm_Right_CrusII_med                   153 0   204 0
+649  Cbm_Right_CrusII_mid                   153 141 209 0
+650  Cbm_Right_CrusII_lat                   204 204 255 0
+651  Cbm_Right_7med                         31  212 194 0
+652  Cbm_Right_7mid                         3   255 237 0
+653  Cbm_Right_7lat                         204 255 255 0
+654  Cbm_Right_8med                         86  74  147 0
+655  Cbm_Right_8mid                         114 114 190 0
+656  Cbm_Right_8lat                         184 178 255 0
+657  Cbm_Right_PUNs                         126 138 37  0
+658  Cbm_Right_TONs                         189 197 117 0
+659  Cbm_Right_FLOs                         240 230 140 0
+660  Cbm_Left_I_V_med                       204  0  0   0
+661  Cbm_Left_I_V_mid                       255  0  0   0
+662  Cbm_Left_VI_med                        0    0  255 0
+663  Cbm_Left_VI_mid                        30  144 255 0
+664  Cbm_Left_VI_lat                        100 212 237 0
+665  Cbm_Left_CrusI_med                     218 165 32  0
+666  Cbm_Left_CrusI_mid                     255 215 0   0
+667  Cbm_Left_CrusI_lat                     255 255 166 0
+668  Cbm_Left_CrusII_med                    153 0   204 0
+669  Cbm_Left_CrusII_mid                    153 141 209 0
+670  Cbm_Left_CrusII_lat                    204 204 255 0
+671  Cbm_Left_7med                          31  212 194 0
+672  Cbm_Left_7mid                          3   255 237 0
+673  Cbm_Left_7lat                          204 255 255 0
+674  Cbm_Left_8med                          86  74  147 0
+675  Cbm_Left_8mid                          114 114 190 0
+676  Cbm_Left_8lat                          184 178 255 0
+677  Cbm_Left_PUNs                          126 138 37  0
+678  Cbm_Left_TONs                          189 197 117 0
+679  Cbm_Left_FLOs                          240 230 140 0
+
+701 CSF-FSL-FAST                            120 18  134 0
+702 GrayMatter-FSL-FAST                     205 62  78  0
+703 WhiteMatter-FSL-FAST                    0   225 0   0
+
+999 SUSPICIOUS                              255 100 100 0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg in which the aseg cortex label is replaced
+# by the labels in the aparc. It also supports wm labels that will
+# eventually be created by mri_aparc2aseg. Otherwise, the aseg labels
+# do not change from above. The cortical lables are the same as in
+# colortable_desikan_killiany.txt, except that left hemisphere has
+# 1000 added to the index and the right has 2000 added.  The label
+# names are also prepended with ctx-lh or ctx-rh. The white matter
+# labels are the same as in colortable_desikan_killiany.txt, except
+# that left hemisphere has 3000 added to the index and the right has
+# 4000 added. The label names are also prepended with wm-lh or wm-rh.
+# Centrum semiovale is also labled with 5001 (left) and 5002 (right).
+# Even further below are the color tables for aparc.a2005s and aparc.a2009s.
+
+#No.    Label Name:                         R   G   B   A
+1000    ctx-lh-unknown                      25  5   25  0
+1001    ctx-lh-bankssts                     25  100 40  0
+1002    ctx-lh-caudalanteriorcingulate      125 100 160 0
+1003    ctx-lh-caudalmiddlefrontal          100 25  0   0
+1004    ctx-lh-corpuscallosum               120 70  50  0
+1005    ctx-lh-cuneus                       220 20  100 0
+1006    ctx-lh-entorhinal                   220 20  10  0
+1007    ctx-lh-fusiform                     180 220 140 0
+1008    ctx-lh-inferiorparietal             220 60  220 0
+1009    ctx-lh-inferiortemporal             180 40  120 0
+1010    ctx-lh-isthmuscingulate             140 20  140 0
+1011    ctx-lh-lateraloccipital             20  30  140 0
+1012    ctx-lh-lateralorbitofrontal         35  75  50  0
+1013    ctx-lh-lingual                      225 140 140 0
+1014    ctx-lh-medialorbitofrontal          200 35  75  0
+1015    ctx-lh-middletemporal               160 100 50  0
+1016    ctx-lh-parahippocampal              20  220 60  0
+1017    ctx-lh-paracentral                  60  220 60  0
+1018    ctx-lh-parsopercularis              220 180 140 0
+1019    ctx-lh-parsorbitalis                20  100 50  0
+1020    ctx-lh-parstriangularis             220 60  20  0
+1021    ctx-lh-pericalcarine                120 100 60  0
+1022    ctx-lh-postcentral                  220 20  20  0
+1023    ctx-lh-posteriorcingulate           220 180 220 0
+1024    ctx-lh-precentral                   60  20  220 0
+1025    ctx-lh-precuneus                    160 140 180 0
+1026    ctx-lh-rostralanteriorcingulate     80  20  140 0
+1027    ctx-lh-rostralmiddlefrontal         75  50  125 0
+1028    ctx-lh-superiorfrontal              20  220 160 0
+1029    ctx-lh-superiorparietal             20  180 140 0
+1030    ctx-lh-superiortemporal             140 220 220 0
+1031    ctx-lh-supramarginal                80  160 20  0
+1032    ctx-lh-frontalpole                  100 0   100 0
+1033    ctx-lh-temporalpole                 70  70  70  0
+1034    ctx-lh-transversetemporal           150 150 200 0
+1035    ctx-lh-insula                       255 192 32  0
+
+2000    ctx-rh-unknown                      25  5   25  0
+2001    ctx-rh-bankssts                     25  100 40  0
+2002    ctx-rh-caudalanteriorcingulate      125 100 160 0
+2003    ctx-rh-caudalmiddlefrontal          100 25  0   0
+2004    ctx-rh-corpuscallosum               120 70  50  0
+2005    ctx-rh-cuneus                       220 20  100 0
+2006    ctx-rh-entorhinal                   220 20  10  0
+2007    ctx-rh-fusiform                     180 220 140 0
+2008    ctx-rh-inferiorparietal             220 60  220 0
+2009    ctx-rh-inferiortemporal             180 40  120 0
+2010    ctx-rh-isthmuscingulate             140 20  140 0
+2011    ctx-rh-lateraloccipital             20  30  140 0
+2012    ctx-rh-lateralorbitofrontal         35  75  50  0
+2013    ctx-rh-lingual                      225 140 140 0
+2014    ctx-rh-medialorbitofrontal          200 35  75  0
+2015    ctx-rh-middletemporal               160 100 50  0
+2016    ctx-rh-parahippocampal              20  220 60  0
+2017    ctx-rh-paracentral                  60  220 60  0
+2018    ctx-rh-parsopercularis              220 180 140 0
+2019    ctx-rh-parsorbitalis                20  100 50  0
+2020    ctx-rh-parstriangularis             220 60  20  0
+2021    ctx-rh-pericalcarine                120 100 60  0
+2022    ctx-rh-postcentral                  220 20  20  0
+2023    ctx-rh-posteriorcingulate           220 180 220 0
+2024    ctx-rh-precentral                   60  20  220 0
+2025    ctx-rh-precuneus                    160 140 180 0
+2026    ctx-rh-rostralanteriorcingulate     80  20  140 0
+2027    ctx-rh-rostralmiddlefrontal         75  50  125 0
+2028    ctx-rh-superiorfrontal              20  220 160 0
+2029    ctx-rh-superiorparietal             20  180 140 0
+2030    ctx-rh-superiortemporal             140 220 220 0
+2031    ctx-rh-supramarginal                80  160 20  0
+2032    ctx-rh-frontalpole                  100 0   100 0
+2033    ctx-rh-temporalpole                 70  70  70  0
+2034    ctx-rh-transversetemporal           150 150 200 0
+2035    ctx-rh-insula                       255 192 32  0
+
+3000    wm-lh-unknown                       230 250 230 0
+3001    wm-lh-bankssts                      230 155 215 0
+3002    wm-lh-caudalanteriorcingulate       130 155 95  0
+3003    wm-lh-caudalmiddlefrontal           155 230 255 0
+3004    wm-lh-corpuscallosum                135 185 205 0
+3005    wm-lh-cuneus                        35  235 155 0
+3006    wm-lh-entorhinal                    35  235 245 0
+3007    wm-lh-fusiform                      75  35  115 0
+3008    wm-lh-inferiorparietal              35  195 35  0
+3009    wm-lh-inferiortemporal              75  215 135 0
+3010    wm-lh-isthmuscingulate              115 235 115 0
+3011    wm-lh-lateraloccipital              235 225 115 0
+3012    wm-lh-lateralorbitofrontal          220 180 205 0
+3013    wm-lh-lingual                       30  115 115 0
+3014    wm-lh-medialorbitofrontal           55  220 180 0
+3015    wm-lh-middletemporal                95  155 205 0
+3016    wm-lh-parahippocampal               235 35  195 0
+3017    wm-lh-paracentral                   195 35  195 0
+3018    wm-lh-parsopercularis               35  75  115 0
+3019    wm-lh-parsorbitalis                 235 155 205 0
+3020    wm-lh-parstriangularis              35  195 235 0
+3021    wm-lh-pericalcarine                 135 155 195 0
+3022    wm-lh-postcentral                   35  235 235 0
+3023    wm-lh-posteriorcingulate            35  75  35  0
+3024    wm-lh-precentral                    195 235 35  0
+3025    wm-lh-precuneus                     95  115 75  0
+3026    wm-lh-rostralanteriorcingulate      175 235 115 0
+3027    wm-lh-rostralmiddlefrontal          180 205 130 0
+3028    wm-lh-superiorfrontal               235 35  95  0
+3029    wm-lh-superiorparietal              235 75  115 0
+3030    wm-lh-superiortemporal              115 35  35  0
+3031    wm-lh-supramarginal                 175 95  235 0
+3032    wm-lh-frontalpole                   155 255 155 0
+3033    wm-lh-temporalpole                  185 185 185 0
+3034    wm-lh-transversetemporal            105 105 55  0
+3035    wm-lh-insula                        254 191 31  0
+
+4000    wm-rh-unknown                       230 250 230 0
+4001    wm-rh-bankssts                      230 155 215 0
+4002    wm-rh-caudalanteriorcingulate       130 155 95  0
+4003    wm-rh-caudalmiddlefrontal           155 230 255 0
+4004    wm-rh-corpuscallosum                135 185 205 0
+4005    wm-rh-cuneus                        35  235 155 0
+4006    wm-rh-entorhinal                    35  235 245 0
+4007    wm-rh-fusiform                      75  35  115 0
+4008    wm-rh-inferiorparietal              35  195 35  0
+4009    wm-rh-inferiortemporal              75  215 135 0
+4010    wm-rh-isthmuscingulate              115 235 115 0
+4011    wm-rh-lateraloccipital              235 225 115 0
+4012    wm-rh-lateralorbitofrontal          220 180 205 0
+4013    wm-rh-lingual                       30  115 115 0
+4014    wm-rh-medialorbitofrontal           55  220 180 0
+4015    wm-rh-middletemporal                95  155 205 0
+4016    wm-rh-parahippocampal               235 35  195 0
+4017    wm-rh-paracentral                   195 35  195 0
+4018    wm-rh-parsopercularis               35  75  115 0
+4019    wm-rh-parsorbitalis                 235 155 205 0
+4020    wm-rh-parstriangularis              35  195 235 0
+4021    wm-rh-pericalcarine                 135 155 195 0
+4022    wm-rh-postcentral                   35  235 235 0
+4023    wm-rh-posteriorcingulate            35  75  35  0
+4024    wm-rh-precentral                    195 235 35  0
+4025    wm-rh-precuneus                     95  115 75  0
+4026    wm-rh-rostralanteriorcingulate      175 235 115 0
+4027    wm-rh-rostralmiddlefrontal          180 205 130 0
+4028    wm-rh-superiorfrontal               235 35  95  0
+4029    wm-rh-superiorparietal              235 75  115 0
+4030    wm-rh-superiortemporal              115 35  35  0
+4031    wm-rh-supramarginal                 175 95  235 0
+4032    wm-rh-frontalpole                   155 255 155 0
+4033    wm-rh-temporalpole                  185 185 185 0
+4034    wm-rh-transversetemporal            105 105 55  0
+4035    wm-rh-insula                        254 191 31  0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg (with --a2005s flag) in which the aseg
+# cortex label is replaced by the labels in the aparc.a2005s. The
+# cortical labels are the same as in Simple_surface_labels2005.txt,
+# except that left hemisphere has 1100 added to the index and the
+# right has 2100 added.  The label names are also prepended with
+# ctx-lh or ctx-rh.  The aparc.a2009s labels are further below
+
+#No.    Label Name:                                     R   G   B   A
+1100    ctx-lh-Unknown                                  0   0   0   0
+1101    ctx-lh-Corpus_callosum                          50  50  50  0
+1102    ctx-lh-G_and_S_Insula_ONLY_AVERAGE              180 20  30  0
+1103    ctx-lh-G_cingulate-Isthmus                      60  25  25  0
+1104    ctx-lh-G_cingulate-Main_part                    25  60  60  0
+
+1200    ctx-lh-G_cingulate-caudal_ACC                   25  60  61  0
+1201    ctx-lh-G_cingulate-rostral_ACC                  25  90  60  0
+1202    ctx-lh-G_cingulate-posterior                    25  120 60  0
+
+1205    ctx-lh-S_cingulate-caudal_ACC                   25  150 60  0
+1206    ctx-lh-S_cingulate-rostral_ACC                  25  180 60  0
+1207    ctx-lh-S_cingulate-posterior                    25  210 60  0
+
+1210    ctx-lh-S_pericallosal-caudal                    25  150 90  0
+1211    ctx-lh-S_pericallosal-rostral                   25  180 90  0
+1212    ctx-lh-S_pericallosal-posterior                 25  210 90  0
+
+1105    ctx-lh-G_cuneus                                 180 20  20  0
+1106    ctx-lh-G_frontal_inf-Opercular_part             220 20  100 0
+1107    ctx-lh-G_frontal_inf-Orbital_part               140 60  60  0
+1108    ctx-lh-G_frontal_inf-Triangular_part            180 220 140 0
+1109    ctx-lh-G_frontal_middle                         140 100 180 0
+1110    ctx-lh-G_frontal_superior                       180 20  140 0
+1111    ctx-lh-G_frontomarginal                         140 20  140 0
+1112    ctx-lh-G_insular_long                           21  10  10  0
+1113    ctx-lh-G_insular_short                          225 140 140 0
+1114    ctx-lh-G_and_S_occipital_inferior               23  60  180 0
+1115    ctx-lh-G_occipital_middle                       180 60  180 0
+1116    ctx-lh-G_occipital_superior                     20  220 60  0
+1117    ctx-lh-G_occipit-temp_lat-Or_fusiform           60  20  140 0
+1118    ctx-lh-G_occipit-temp_med-Lingual_part          220 180 140 0
+1119    ctx-lh-G_occipit-temp_med-Parahippocampal_part  65  100 20  0
+1120    ctx-lh-G_orbital                                220 60  20  0
+1121    ctx-lh-G_paracentral                            60  100 60  0
+1122    ctx-lh-G_parietal_inferior-Angular_part         20  60  220 0
+1123    ctx-lh-G_parietal_inferior-Supramarginal_part   100 100 60  0
+1124    ctx-lh-G_parietal_superior                      220 180 220 0
+1125    ctx-lh-G_postcentral                            20  180 140 0
+1126    ctx-lh-G_precentral                             60  140 180 0
+1127    ctx-lh-G_precuneus                              25  20  140 0
+1128    ctx-lh-G_rectus                                 20  60  100 0
+1129    ctx-lh-G_subcallosal                            60  220 20  0
+1130    ctx-lh-G_subcentral                             60  20  220 0
+1131    ctx-lh-G_temporal_inferior                      220 220 100 0
+1132    ctx-lh-G_temporal_middle                        180 60  60  0
+1133    ctx-lh-G_temp_sup-G_temp_transv_and_interm_S    60  60  220 0
+1134    ctx-lh-G_temp_sup-Lateral_aspect                220 60  220 0
+1135    ctx-lh-G_temp_sup-Planum_polare                 65  220 60  0
+1136    ctx-lh-G_temp_sup-Planum_tempolare              25  140 20  0
+1137    ctx-lh-G_and_S_transverse_frontopolar           13  0   250 0
+1138    ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal     61  20  220 0
+1139    ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical       61  20  60  0
+1140    ctx-lh-Lat_Fissure-post_sgt                     61  60  100 0
+1141    ctx-lh-Medial_wall                              25  25  25  0
+1142    ctx-lh-Pole_occipital                           140 20  60  0
+1143    ctx-lh-Pole_temporal                            220 180 20  0
+1144    ctx-lh-S_calcarine                              63  180 180 0
+1145    ctx-lh-S_central                                221 20  10  0
+1146    ctx-lh-S_central_insula                         21  220 20  0
+1147    ctx-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20  0
+1148    ctx-lh-S_cingulate-Marginalis_part              221 20  100 0
+1149    ctx-lh-S_circular_insula_anterior               221 60  140 0
+1150    ctx-lh-S_circular_insula_inferior               221 20  220 0
+1151    ctx-lh-S_circular_insula_superior               61  220 220 0
+1152    ctx-lh-S_collateral_transverse_ant              100 200 200 0
+1153    ctx-lh-S_collateral_transverse_post             10  200 200 0
+1154    ctx-lh-S_frontal_inferior                       221 220 20  0
+1155    ctx-lh-S_frontal_middle                         141 20  100 0
+1156    ctx-lh-S_frontal_superior                       61  220 100 0
+1157    ctx-lh-S_frontomarginal                         21  220 60  0
+1158    ctx-lh-S_intermedius_primus-Jensen              141 60  20  0
+1159    ctx-lh-S_intraparietal-and_Parietal_transverse  143 20  220 0
+1160    ctx-lh-S_occipital_anterior                     61  20  180 0
+1161    ctx-lh-S_occipital_middle_and_Lunatus           101 60  220 0
+1162    ctx-lh-S_occipital_superior_and_transversalis   21  20  140 0
+1163    ctx-lh-S_occipito-temporal_lateral              221 140 20  0
+1164    ctx-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0
+1165    ctx-lh-S_orbital-H_shapped                      101 20  20  0
+1166    ctx-lh-S_orbital_lateral                        221 100 20  0
+1167    ctx-lh-S_orbital_medial-Or_olfactory            181 200 20  0
+1168    ctx-lh-S_paracentral                            21  180 140 0
+1169    ctx-lh-S_parieto_occipital                      101 100 180 0
+1170    ctx-lh-S_pericallosal                           181 220 20  0
+1171    ctx-lh-S_postcentral                            21  140 200 0
+1172    ctx-lh-S_precentral-Inferior-part               21  20  240 0
+1173    ctx-lh-S_precentral-Superior-part               21  20  200 0
+1174    ctx-lh-S_subcentral_ant                         61  180 60  0
+1175    ctx-lh-S_subcentral_post                        61  180 250 0
+1176    ctx-lh-S_suborbital                             21  20  60  0
+1177    ctx-lh-S_subparietal                            101 60  60  0
+1178    ctx-lh-S_supracingulate                         21  220 220 0
+1179    ctx-lh-S_temporal_inferior                      21  180 180 0
+1180    ctx-lh-S_temporal_superior                      223 220 60  0
+1181    ctx-lh-S_temporal_transverse                    221 60  60  0
+
+2100    ctx-rh-Unknown                                  0   0   0   0
+2101    ctx-rh-Corpus_callosum                          50  50  50  0
+2102    ctx-rh-G_and_S_Insula_ONLY_AVERAGE              180 20  30  0
+2103    ctx-rh-G_cingulate-Isthmus                      60  25  25  0
+2104    ctx-rh-G_cingulate-Main_part                    25  60  60  0
+
+2105    ctx-rh-G_cuneus                                 180 20  20  0
+2106    ctx-rh-G_frontal_inf-Opercular_part             220 20  100 0
+2107    ctx-rh-G_frontal_inf-Orbital_part               140 60  60  0
+2108    ctx-rh-G_frontal_inf-Triangular_part            180 220 140 0
+2109    ctx-rh-G_frontal_middle                         140 100 180 0
+2110    ctx-rh-G_frontal_superior                       180 20  140 0
+2111    ctx-rh-G_frontomarginal                         140 20  140 0
+2112    ctx-rh-G_insular_long                           21  10  10  0
+2113    ctx-rh-G_insular_short                          225 140 140 0
+2114    ctx-rh-G_and_S_occipital_inferior               23  60  180 0
+2115    ctx-rh-G_occipital_middle                       180 60  180 0
+2116    ctx-rh-G_occipital_superior                     20  220 60  0
+2117    ctx-rh-G_occipit-temp_lat-Or_fusiform           60  20  140 0
+2118    ctx-rh-G_occipit-temp_med-Lingual_part          220 180 140 0
+2119    ctx-rh-G_occipit-temp_med-Parahippocampal_part  65  100 20  0
+2120    ctx-rh-G_orbital                                220 60  20  0
+2121    ctx-rh-G_paracentral                            60  100 60  0
+2122    ctx-rh-G_parietal_inferior-Angular_part         20  60  220 0
+2123    ctx-rh-G_parietal_inferior-Supramarginal_part   100 100 60  0
+2124    ctx-rh-G_parietal_superior                      220 180 220 0
+2125    ctx-rh-G_postcentral                            20  180 140 0
+2126    ctx-rh-G_precentral                             60  140 180 0
+2127    ctx-rh-G_precuneus                              25  20  140 0
+2128    ctx-rh-G_rectus                                 20  60  100 0
+2129    ctx-rh-G_subcallosal                            60  220 20  0
+2130    ctx-rh-G_subcentral                             60  20  220 0
+2131    ctx-rh-G_temporal_inferior                      220 220 100 0
+2132    ctx-rh-G_temporal_middle                        180 60  60  0
+2133    ctx-rh-G_temp_sup-G_temp_transv_and_interm_S    60  60  220 0
+2134    ctx-rh-G_temp_sup-Lateral_aspect                220 60  220 0
+2135    ctx-rh-G_temp_sup-Planum_polare                 65  220 60  0
+2136    ctx-rh-G_temp_sup-Planum_tempolare              25  140 20  0
+2137    ctx-rh-G_and_S_transverse_frontopolar           13  0   250 0
+2138    ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal     61  20  220 0
+2139    ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical       61  20  60  0
+2140    ctx-rh-Lat_Fissure-post_sgt                     61  60  100 0
+2141    ctx-rh-Medial_wall                              25  25  25  0
+2142    ctx-rh-Pole_occipital                           140 20  60  0
+2143    ctx-rh-Pole_temporal                            220 180 20  0
+2144    ctx-rh-S_calcarine                              63  180 180 0
+2145    ctx-rh-S_central                                221 20  10  0
+2146    ctx-rh-S_central_insula                         21  220 20  0
+2147    ctx-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20  0
+2148    ctx-rh-S_cingulate-Marginalis_part              221 20  100 0
+2149    ctx-rh-S_circular_insula_anterior               221 60  140 0
+2150    ctx-rh-S_circular_insula_inferior               221 20  220 0
+2151    ctx-rh-S_circular_insula_superior               61  220 220 0
+2152    ctx-rh-S_collateral_transverse_ant              100 200 200 0
+2153    ctx-rh-S_collateral_transverse_post             10  200 200 0
+2154    ctx-rh-S_frontal_inferior                       221 220 20  0
+2155    ctx-rh-S_frontal_middle                         141 20  100 0
+2156    ctx-rh-S_frontal_superior                       61  220 100 0
+2157    ctx-rh-S_frontomarginal                         21  220 60  0
+2158    ctx-rh-S_intermedius_primus-Jensen              141 60  20  0
+2159    ctx-rh-S_intraparietal-and_Parietal_transverse  143 20  220 0
+2160    ctx-rh-S_occipital_anterior                     61  20  180 0
+2161    ctx-rh-S_occipital_middle_and_Lunatus           101 60  220 0
+2162    ctx-rh-S_occipital_superior_and_transversalis   21  20  140 0
+2163    ctx-rh-S_occipito-temporal_lateral              221 140 20  0
+2164    ctx-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0
+2165    ctx-rh-S_orbital-H_shapped                      101 20  20  0
+2166    ctx-rh-S_orbital_lateral                        221 100 20  0
+2167    ctx-rh-S_orbital_medial-Or_olfactory            181 200 20  0
+2168    ctx-rh-S_paracentral                            21  180 140 0
+2169    ctx-rh-S_parieto_occipital                      101 100 180 0
+2170    ctx-rh-S_pericallosal                           181 220 20  0
+2171    ctx-rh-S_postcentral                            21  140 200 0
+2172    ctx-rh-S_precentral-Inferior-part               21  20  240 0
+2173    ctx-rh-S_precentral-Superior-part               21  20  200 0
+2174    ctx-rh-S_subcentral_ant                         61  180 60  0
+2175    ctx-rh-S_subcentral_post                        61  180 250 0
+2176    ctx-rh-S_suborbital                             21  20  60  0
+2177    ctx-rh-S_subparietal                            101 60  60  0
+2178    ctx-rh-S_supracingulate                         21  220 220 0
+2179    ctx-rh-S_temporal_inferior                      21  180 180 0
+2180    ctx-rh-S_temporal_superior                      223 220 60  0
+2181    ctx-rh-S_temporal_transverse                    221 60  60  0
+
+
+2200    ctx-rh-G_cingulate-caudal_ACC                   25  60  61  0
+2201    ctx-rh-G_cingulate-rostral_ACC                  25  90  60  0
+2202    ctx-rh-G_cingulate-posterior                    25  120 60  0
+
+2205    ctx-rh-S_cingulate-caudal_ACC                   25  150 60  0
+2206    ctx-rh-S_cingulate-rostral_ACC                  25  180 60  0
+2207    ctx-rh-S_cingulate-posterior                    25  210 60  0
+
+2210    ctx-rh-S_pericallosal-caudal                    25  150 90  0
+2211    ctx-rh-S_pericallosal-rostral                   25  180 90  0
+2212    ctx-rh-S_pericallosal-posterior                 25  210 90  0
+
+3100    wm-lh-Unknown                                   0   0   0   0
+3101    wm-lh-Corpus_callosum                           50  50  50  0
+3102    wm-lh-G_and_S_Insula_ONLY_AVERAGE               180 20  30  0
+3103    wm-lh-G_cingulate-Isthmus                       60  25  25  0
+3104    wm-lh-G_cingulate-Main_part                     25  60  60  0
+3105    wm-lh-G_cuneus                                  180 20  20  0
+3106    wm-lh-G_frontal_inf-Opercular_part              220 20  100 0
+3107    wm-lh-G_frontal_inf-Orbital_part                140 60  60  0
+3108    wm-lh-G_frontal_inf-Triangular_part             180 220 140 0
+3109    wm-lh-G_frontal_middle                          140 100 180 0
+3110    wm-lh-G_frontal_superior                        180 20  140 0
+3111    wm-lh-G_frontomarginal                          140 20  140 0
+3112    wm-lh-G_insular_long                            21  10  10  0
+3113    wm-lh-G_insular_short                           225 140 140 0
+3114    wm-lh-G_and_S_occipital_inferior                23  60  180 0
+3115    wm-lh-G_occipital_middle                        180 60  180 0
+3116    wm-lh-G_occipital_superior                      20  220 60  0
+3117    wm-lh-G_occipit-temp_lat-Or_fusiform            60  20  140 0
+3118    wm-lh-G_occipit-temp_med-Lingual_part           220 180 140 0
+3119    wm-lh-G_occipit-temp_med-Parahippocampal_part   65  100 20  0
+3120    wm-lh-G_orbital                                 220 60  20  0
+3121    wm-lh-G_paracentral                             60  100 60  0
+3122    wm-lh-G_parietal_inferior-Angular_part          20  60  220 0
+3123    wm-lh-G_parietal_inferior-Supramarginal_part    100 100 60  0
+3124    wm-lh-G_parietal_superior                       220 180 220 0
+3125    wm-lh-G_postcentral                             20  180 140 0
+3126    wm-lh-G_precentral                              60  140 180 0
+3127    wm-lh-G_precuneus                               25  20  140 0
+3128    wm-lh-G_rectus                                  20  60  100 0
+3129    wm-lh-G_subcallosal                             60  220 20  0
+3130    wm-lh-G_subcentral                              60  20  220 0
+3131    wm-lh-G_temporal_inferior                       220 220 100 0
+3132    wm-lh-G_temporal_middle                         180 60  60  0
+3133    wm-lh-G_temp_sup-G_temp_transv_and_interm_S     60  60  220 0
+3134    wm-lh-G_temp_sup-Lateral_aspect                 220 60  220 0
+3135    wm-lh-G_temp_sup-Planum_polare                  65  220 60  0
+3136    wm-lh-G_temp_sup-Planum_tempolare               25  140 20  0
+3137    wm-lh-G_and_S_transverse_frontopolar            13  0   250 0
+3138    wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal      61  20  220 0
+3139    wm-lh-Lat_Fissure-ant_sgt-ramus_vertical        61  20  60  0
+3140    wm-lh-Lat_Fissure-post_sgt                      61  60  100 0
+3141    wm-lh-Medial_wall                               25  25  25  0
+3142    wm-lh-Pole_occipital                            140 20  60  0
+3143    wm-lh-Pole_temporal                             220 180 20  0
+3144    wm-lh-S_calcarine                               63  180 180 0
+3145    wm-lh-S_central                                 221 20  10  0
+3146    wm-lh-S_central_insula                          21  220 20  0
+3147    wm-lh-S_cingulate-Main_part_and_Intracingulate  183 100 20  0
+3148    wm-lh-S_cingulate-Marginalis_part               221 20  100 0
+3149    wm-lh-S_circular_insula_anterior                221 60  140 0
+3150    wm-lh-S_circular_insula_inferior                221 20  220 0
+3151    wm-lh-S_circular_insula_superior                61  220 220 0
+3152    wm-lh-S_collateral_transverse_ant               100 200 200 0
+3153    wm-lh-S_collateral_transverse_post              10  200 200 0
+3154    wm-lh-S_frontal_inferior                        221 220 20  0
+3155    wm-lh-S_frontal_middle                          141 20  100 0
+3156    wm-lh-S_frontal_superior                        61  220 100 0
+3157    wm-lh-S_frontomarginal                          21  220 60  0
+3158    wm-lh-S_intermedius_primus-Jensen               141 60  20  0
+3159    wm-lh-S_intraparietal-and_Parietal_transverse   143 20  220 0
+3160    wm-lh-S_occipital_anterior                      61  20  180 0
+3161    wm-lh-S_occipital_middle_and_Lunatus            101 60  220 0
+3162    wm-lh-S_occipital_superior_and_transversalis    21  20  140 0
+3163    wm-lh-S_occipito-temporal_lateral               221 140 20  0
+3164    wm-lh-S_occipito-temporal_medial_and_S_Lingual  141 100 220 0
+3165    wm-lh-S_orbital-H_shapped                       101 20  20  0
+3166    wm-lh-S_orbital_lateral                         221 100 20  0
+3167    wm-lh-S_orbital_medial-Or_olfactory             181 200 20  0
+3168    wm-lh-S_paracentral                             21  180 140 0
+3169    wm-lh-S_parieto_occipital                       101 100 180 0
+3170    wm-lh-S_pericallosal                            181 220 20  0
+3171    wm-lh-S_postcentral                             21  140 200 0
+3172    wm-lh-S_precentral-Inferior-part                21  20  240 0
+3173    wm-lh-S_precentral-Superior-part                21  20  200 0
+3174    wm-lh-S_subcentral_ant                          61  180 60  0
+3175    wm-lh-S_subcentral_post                         61  180 250 0
+3176    wm-lh-S_suborbital                              21  20  60  0
+3177    wm-lh-S_subparietal                             101 60  60  0
+3178    wm-lh-S_supracingulate                          21  220 220 0
+3179    wm-lh-S_temporal_inferior                       21  180 180 0
+3180    wm-lh-S_temporal_superior                       223 220 60  0
+3181    wm-lh-S_temporal_transverse                     221 60  60  0
+
+4100    wm-rh-Unknown                                   0   0   0   0
+4101    wm-rh-Corpus_callosum                           50  50  50  0
+4102    wm-rh-G_and_S_Insula_ONLY_AVERAGE               180 20  30  0
+4103    wm-rh-G_cingulate-Isthmus                       60  25  25  0
+4104    wm-rh-G_cingulate-Main_part                     25  60  60  0
+4105    wm-rh-G_cuneus                                  180 20  20  0
+4106    wm-rh-G_frontal_inf-Opercular_part              220 20  100 0
+4107    wm-rh-G_frontal_inf-Orbital_part                140 60  60  0
+4108    wm-rh-G_frontal_inf-Triangular_part             180 220 140 0
+4109    wm-rh-G_frontal_middle                          140 100 180 0
+4110    wm-rh-G_frontal_superior                        180 20  140 0
+4111    wm-rh-G_frontomarginal                          140 20  140 0
+4112    wm-rh-G_insular_long                            21  10  10  0
+4113    wm-rh-G_insular_short                           225 140 140 0
+4114    wm-rh-G_and_S_occipital_inferior                23  60  180 0
+4115    wm-rh-G_occipital_middle                        180 60  180 0
+4116    wm-rh-G_occipital_superior                      20  220 60  0
+4117    wm-rh-G_occipit-temp_lat-Or_fusiform            60  20  140 0
+4118    wm-rh-G_occipit-temp_med-Lingual_part           220 180 140 0
+4119    wm-rh-G_occipit-temp_med-Parahippocampal_part   65  100 20  0
+4120    wm-rh-G_orbital                                 220 60  20  0
+4121    wm-rh-G_paracentral                             60  100 60  0
+4122    wm-rh-G_parietal_inferior-Angular_part          20  60  220 0
+4123    wm-rh-G_parietal_inferior-Supramarginal_part    100 100 60  0
+4124    wm-rh-G_parietal_superior                       220 180 220 0
+4125    wm-rh-G_postcentral                             20  180 140 0
+4126    wm-rh-G_precentral                              60  140 180 0
+4127    wm-rh-G_precuneus                               25  20  140 0
+4128    wm-rh-G_rectus                                  20  60  100 0
+4129    wm-rh-G_subcallosal                             60  220 20  0
+4130    wm-rh-G_subcentral                              60  20  220 0
+4131    wm-rh-G_temporal_inferior                       220 220 100 0
+4132    wm-rh-G_temporal_middle                         180 60  60  0
+4133    wm-rh-G_temp_sup-G_temp_transv_and_interm_S     60  60  220 0
+4134    wm-rh-G_temp_sup-Lateral_aspect                 220 60  220 0
+4135    wm-rh-G_temp_sup-Planum_polare                  65  220 60  0
+4136    wm-rh-G_temp_sup-Planum_tempolare               25  140 20  0
+4137    wm-rh-G_and_S_transverse_frontopolar            13  0   250 0
+4138    wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal      61  20  220 0
+4139    wm-rh-Lat_Fissure-ant_sgt-ramus_vertical        61  20  60  0
+4140    wm-rh-Lat_Fissure-post_sgt                      61  60  100 0
+4141    wm-rh-Medial_wall                               25  25  25  0
+4142    wm-rh-Pole_occipital                            140 20  60  0
+4143    wm-rh-Pole_temporal                             220 180 20  0
+4144    wm-rh-S_calcarine                               63  180 180 0
+4145    wm-rh-S_central                                 221 20  10  0
+4146    wm-rh-S_central_insula                          21  220 20  0
+4147    wm-rh-S_cingulate-Main_part_and_Intracingulate  183 100 20  0
+4148    wm-rh-S_cingulate-Marginalis_part               221 20  100 0
+4149    wm-rh-S_circular_insula_anterior                221 60  140 0
+4150    wm-rh-S_circular_insula_inferior                221 20  220 0
+4151    wm-rh-S_circular_insula_superior                61  220 220 0
+4152    wm-rh-S_collateral_transverse_ant               100 200 200 0
+4153    wm-rh-S_collateral_transverse_post              10  200 200 0
+4154    wm-rh-S_frontal_inferior                        221 220 20  0
+4155    wm-rh-S_frontal_middle                          141 20  100 0
+4156    wm-rh-S_frontal_superior                        61  220 100 0
+4157    wm-rh-S_frontomarginal                          21  220 60  0
+4158    wm-rh-S_intermedius_primus-Jensen               141 60  20  0
+4159    wm-rh-S_intraparietal-and_Parietal_transverse   143 20  220 0
+4160    wm-rh-S_occipital_anterior                      61  20  180 0
+4161    wm-rh-S_occipital_middle_and_Lunatus            101 60  220 0
+4162    wm-rh-S_occipital_superior_and_transversalis    21  20  140 0
+4163    wm-rh-S_occipito-temporal_lateral               221 140 20  0
+4164    wm-rh-S_occipito-temporal_medial_and_S_Lingual  141 100 220 0
+4165    wm-rh-S_orbital-H_shapped                       101 20  20  0
+4166    wm-rh-S_orbital_lateral                         221 100 20  0
+4167    wm-rh-S_orbital_medial-Or_olfactory             181 200 20  0
+4168    wm-rh-S_paracentral                             21  180 140 0
+4169    wm-rh-S_parieto_occipital                       101 100 180 0
+4170    wm-rh-S_pericallosal                            181 220 20  0
+4171    wm-rh-S_postcentral                             21  140 200 0
+4172    wm-rh-S_precentral-Inferior-part                21  20  240 0
+4173    wm-rh-S_precentral-Superior-part                21  20  200 0
+4174    wm-rh-S_subcentral_ant                          61  180 60  0
+4175    wm-rh-S_subcentral_post                         61  180 250 0
+4176    wm-rh-S_suborbital                              21  20  60  0
+4177    wm-rh-S_subparietal                             101 60  60  0
+4178    wm-rh-S_supracingulate                          21  220 220 0
+4179    wm-rh-S_temporal_inferior                       21  180 180 0
+4180    wm-rh-S_temporal_superior                       223 220 60  0
+4181    wm-rh-S_temporal_transverse                     221 60  60  0
+
+5001    Left-UnsegmentedWhiteMatter                     20  30  40  0
+5002    Right-UnsegmentedWhiteMatter                    20  30  40  0
+
+# Below is the color table for white-matter pathways produced by dmri_paths
+
+#No.   Label Name:                                      R   G   B   A
+#
+5100   fmajor                                           204 102 102 0
+5101   fminor                                           204 102 102 0
+#
+5102   lh.atr                                           255 255 102 0
+5103   lh.cab                                           153 204 0   0
+5104   lh.ccg                                           0   153 153 0
+5105   lh.cst                                           204 153 255 0
+5106   lh.ilf                                           255 153 51  0
+5107   lh.slfp                                          204 204 204 0
+5108   lh.slft                                          153 255 255 0
+5109   lh.unc                                           102 153 255 0
+#
+5110   rh.atr                                           255 255 102 0
+5111   rh.cab                                           153 204 0   0
+5112   rh.ccg                                           0   153 153 0
+5113   rh.cst                                           204 153 255 0
+5114   rh.ilf                                           255 153 51  0
+5115   rh.slfp                                          204 204 204 0
+5116   rh.slft                                          153 255 255 0
+5117   rh.unc                                           102 153 255 0
+
+# These are the same tracula labels as above in human-readable form
+5200   CC-ForcepsMajor                                  204 102 102 0
+5201   CC-ForcepsMinor                                  204 102 102 0
+5202   LAntThalRadiation                                255 255 102 0
+5203   LCingulumAngBundle                               153 204 0   0
+5204   LCingulumCingGyrus                               0   153 153 0
+5205   LCorticospinalTract                              204 153 255 0
+5206   LInfLongFas                                      255 153 51  0
+5207   LSupLongFasParietal                              204 204 204 0
+5208   LSupLongFasTemporal                              153 255 255 0
+5209   LUncinateFas                                     102 153 255 0
+5210   RAntThalRadiation                                255 255 102 0
+5211   RCingulumAngBundle                               153 204 0   0
+5212   RCingulumCingGyrus                               0   153 153 0
+5213   RCorticospinalTract                              204 153 255 0
+5214   RInfLongFas                                      255 153 51  0
+5215   RSupLongFasParietal                              204 204 204 0
+5216   RSupLongFasTemporal                              153 255 255 0
+5217   RUncinateFas                                     102 153 255 0
+
+########################################
+
+6000   CST-orig                                         0   255 0   0
+6001   CST-hammer                                       255 255 0   0
+6002   CST-CVS                                          0   255 255 0
+6003   CST-flirt                                        0   0   255 0
+
+6010   Left-SLF1                                        236 16  231 0
+6020   Right-SLF1                                       237 18  232 0
+
+6030   Left-SLF3                                        236 13  227 0
+6040   Right-SLF3                                       236 17  228 0
+
+6050   Left-CST                                         1   255 1   0
+6060   Right-CST                                        2   255 1   0
+
+6070   Left-SLF2                                        236 14  230 0
+6080   Right-SLF2                                       237 14  230 0
+
+#No.  Label Name:                                       R   G   B   A
+
+7001  Lateral-nucleus                                   72  132 181 0
+7002  Basolateral-nucleus                               243 243 243 0
+7003  Basal-nucleus                                     207 63  79  0
+7004  Centromedial-nucleus                              121 20  135 0
+7005  Central-nucleus                                   197 60  248 0
+7006  Medial-nucleus                                    2   149 2   0
+7007  Cortical-nucleus                                  221 249 166 0
+7008  Accessory-Basal-nucleus                           232 146 35  0
+7009  Corticoamygdaloid-transitio                       20  60  120 0
+7010  Anterior-amygdaloid-area-AAA                      250 250 0   0
+7011  Fusion-amygdala-HP-FAH                            122 187 222 0
+7012  Hippocampal-amygdala-transition-HATA              237 12  177 0
+7013  Endopiriform-nucleus                              10  49  255 0
+7014  Lateral-nucleus-olfactory-tract                   205 184 144 0
+7015  Paralaminar-nucleus                               45  205 165 0
+7016  Intercalated-nucleus                              117 160 175 0
+7017  Prepiriform-cortex                                221 217 21  0
+7018  Periamygdaloid-cortex                             20  60  120 0
+7019  Envelope-Amygdala                                 141 21  100 0
+7020  Extranuclear-Amydala                              225 140 141 0
+
+7100  Brainstem-inferior-colliculus                     42  201 168 0
+7101  Brainstem-cochlear-nucleus                        168 104 162 0
+
+8001  Thalamus-Anterior                                 74  130 181 0
+8002  Thalamus-Ventral-anterior                         242 241 240 0
+8003  Thalamus-Lateral-dorsal                           206 65  78  0
+8004  Thalamus-Lateral-posterior                        120 21  133 0
+8005  Thalamus-Ventral-lateral                          195 61  246 0
+8006  Thalamus-Ventral-posterior-medial                 3   147 6   0
+8007  Thalamus-Ventral-posterior-lateral                220 251 163 0
+8008  Thalamus-intralaminar                             232 146 33  0
+8009  Thalamus-centromedian                             4   114 14  0
+8010  Thalamus-mediodorsal                              121 184 220 0
+8011  Thalamus-medial                                   235 11  175 0
+8012  Thalamus-pulvinar                                 12  46  250 0
+8013  Thalamus-lateral-geniculate                       203 182 143 0
+8014  Thalamus-medial-geniculate                        42  204 167 0
+
+#
+# Labels for thalamus parcellation using probabilistic tractography. See:
+# Functional--Anatomical Validation and Individual Variation of Diffusion
+# Tractography-based Segmentation of the Human Thalamus; Cerebral Cortex
+# January 2005;15:31--39, doi:10.1093/cercor/bhh105, Advance Access
+# publication July 6, 2004
+#
+
+#No.    Label Name:                         R   G   B   A
+9000    ctx-lh-prefrontal                   30  5   30  0
+9001    ctx-lh-primary-motor                30  100 45  0
+9002    ctx-lh-premotor                     130 100 165 0
+9003    ctx-lh-temporal                     105 25  5   0
+9004    ctx-lh-posterior-parietal           125 70  55  0
+9005    ctx-lh-prim-sec-somatosensory       225 20  105 0
+9006    ctx-lh-occipital                    225 20  15  0
+
+9500    ctx-rh-prefrontal                   30  55  30  0
+9501    ctx-rh-primary-motor                30  150 45  0
+9502    ctx-rh-premotor                     130 150 165 0
+9503    ctx-rh-temporal                     105 75  5   0
+9504    ctx-rh-posterior-parietal           125 120 55  0
+9505    ctx-rh-prim-sec-somatosensory       225 70  105 0
+9506    ctx-rh-occipital                    225 70  15  0
+
+# Below is the color table for the cortical labels of the seg volume
+# created by mri_aparc2aseg (with --a2009s flag) in which the aseg
+# cortex label is replaced by the labels in the aparc.a2009s. The
+# cortical labels are the same as in Simple_surface_labels2009.txt,
+# except that left hemisphere has 11100 added to the index and the
+# right has 12100 added.  The label names are also prepended with
+# ctx_lh_, ctx_rh_, wm_lh_ and wm_rh_ (note usage of _ instead of -
+# to differentiate from a2005s labels).
+
+#No.   Label Name:                              R   G   B   A
+11100  ctx_lh_Unknown                           0   0   0   0
+11101  ctx_lh_G_and_S_frontomargin             23 220  60   0
+11102  ctx_lh_G_and_S_occipital_inf            23  60 180   0
+11103  ctx_lh_G_and_S_paracentral              63 100  60   0
+11104  ctx_lh_G_and_S_subcentral               63  20 220   0
+11105  ctx_lh_G_and_S_transv_frontopol         13   0 250   0
+11106  ctx_lh_G_and_S_cingul-Ant               26  60   0   0
+11107  ctx_lh_G_and_S_cingul-Mid-Ant           26  60  75   0
+11108  ctx_lh_G_and_S_cingul-Mid-Post          26  60 150   0
+11109  ctx_lh_G_cingul-Post-dorsal             25  60 250   0
+11110  ctx_lh_G_cingul-Post-ventral            60  25  25   0
+11111  ctx_lh_G_cuneus                        180  20  20   0
+11112  ctx_lh_G_front_inf-Opercular           220  20 100   0
+11113  ctx_lh_G_front_inf-Orbital             140  60  60   0
+11114  ctx_lh_G_front_inf-Triangul            180 220 140   0
+11115  ctx_lh_G_front_middle                  140 100 180   0
+11116  ctx_lh_G_front_sup                     180  20 140   0
+11117  ctx_lh_G_Ins_lg_and_S_cent_ins          23  10  10   0
+11118  ctx_lh_G_insular_short                 225 140 140   0
+11119  ctx_lh_G_occipital_middle              180  60 180   0
+11120  ctx_lh_G_occipital_sup                  20 220  60   0
+11121  ctx_lh_G_oc-temp_lat-fusifor            60  20 140   0
+11122  ctx_lh_G_oc-temp_med-Lingual           220 180 140   0
+11123  ctx_lh_G_oc-temp_med-Parahip            65 100  20   0
+11124  ctx_lh_G_orbital                       220  60  20   0
+11125  ctx_lh_G_pariet_inf-Angular             20  60 220   0
+11126  ctx_lh_G_pariet_inf-Supramar           100 100  60   0
+11127  ctx_lh_G_parietal_sup                  220 180 220   0
+11128  ctx_lh_G_postcentral                    20 180 140   0
+11129  ctx_lh_G_precentral                     60 140 180   0
+11130  ctx_lh_G_precuneus                      25  20 140   0
+11131  ctx_lh_G_rectus                         20  60 100   0
+11132  ctx_lh_G_subcallosal                    60 220  20   0
+11133  ctx_lh_G_temp_sup-G_T_transv            60  60 220   0
+11134  ctx_lh_G_temp_sup-Lateral              220  60 220   0
+11135  ctx_lh_G_temp_sup-Plan_polar            65 220  60   0
+11136  ctx_lh_G_temp_sup-Plan_tempo            25 140  20   0
+11137  ctx_lh_G_temporal_inf                  220 220 100   0
+11138  ctx_lh_G_temporal_middle               180  60  60   0
+11139  ctx_lh_Lat_Fis-ant-Horizont             61  20 220   0
+11140  ctx_lh_Lat_Fis-ant-Vertical             61  20  60   0
+11141  ctx_lh_Lat_Fis-post                     61  60 100   0
+11142  ctx_lh_Medial_wall                      25  25  25   0
+11143  ctx_lh_Pole_occipital                  140  20  60   0
+11144  ctx_lh_Pole_temporal                   220 180  20   0
+11145  ctx_lh_S_calcarine                      63 180 180   0
+11146  ctx_lh_S_central                       221  20  10   0
+11147  ctx_lh_S_cingul-Marginalis             221  20 100   0
+11148  ctx_lh_S_circular_insula_ant           221  60 140   0
+11149  ctx_lh_S_circular_insula_inf           221  20 220   0
+11150  ctx_lh_S_circular_insula_sup            61 220 220   0
+11151  ctx_lh_S_collat_transv_ant             100 200 200   0
+11152  ctx_lh_S_collat_transv_post             10 200 200   0
+11153  ctx_lh_S_front_inf                     221 220  20   0
+11154  ctx_lh_S_front_middle                  141  20 100   0
+11155  ctx_lh_S_front_sup                      61 220 100   0
+11156  ctx_lh_S_interm_prim-Jensen            141  60  20   0
+11157  ctx_lh_S_intrapariet_and_P_trans       143  20 220   0
+11158  ctx_lh_S_oc_middle_and_Lunatus         101  60 220   0
+11159  ctx_lh_S_oc_sup_and_transversal         21  20 140   0
+11160  ctx_lh_S_occipital_ant                  61  20 180   0
+11161  ctx_lh_S_oc-temp_lat                   221 140  20   0
+11162  ctx_lh_S_oc-temp_med_and_Lingual       141 100 220   0
+11163  ctx_lh_S_orbital_lateral               221 100  20   0
+11164  ctx_lh_S_orbital_med-olfact            181 200  20   0
+11165  ctx_lh_S_orbital-H_Shaped              101  20  20   0
+11166  ctx_lh_S_parieto_occipital             101 100 180   0
+11167  ctx_lh_S_pericallosal                  181 220  20   0
+11168  ctx_lh_S_postcentral                    21 140 200   0
+11169  ctx_lh_S_precentral-inf-part            21  20 240   0
+11170  ctx_lh_S_precentral-sup-part            21  20 200   0
+11171  ctx_lh_S_suborbital                     21  20  60   0
+11172  ctx_lh_S_subparietal                   101  60  60   0
+11173  ctx_lh_S_temporal_inf                   21 180 180   0
+11174  ctx_lh_S_temporal_sup                  223 220  60   0
+11175  ctx_lh_S_temporal_transverse           221  60  60   0
+
+12100  ctx_rh_Unknown                           0   0   0   0
+12101  ctx_rh_G_and_S_frontomargin             23 220  60   0
+12102  ctx_rh_G_and_S_occipital_inf            23  60 180   0
+12103  ctx_rh_G_and_S_paracentral              63 100  60   0
+12104  ctx_rh_G_and_S_subcentral               63  20 220   0
+12105  ctx_rh_G_and_S_transv_frontopol         13   0 250   0
+12106  ctx_rh_G_and_S_cingul-Ant               26  60   0   0
+12107  ctx_rh_G_and_S_cingul-Mid-Ant           26  60  75   0
+12108  ctx_rh_G_and_S_cingul-Mid-Post          26  60 150   0
+12109  ctx_rh_G_cingul-Post-dorsal             25  60 250   0
+12110  ctx_rh_G_cingul-Post-ventral            60  25  25   0
+12111  ctx_rh_G_cuneus                        180  20  20   0
+12112  ctx_rh_G_front_inf-Opercular           220  20 100   0
+12113  ctx_rh_G_front_inf-Orbital             140  60  60   0
+12114  ctx_rh_G_front_inf-Triangul            180 220 140   0
+12115  ctx_rh_G_front_middle                  140 100 180   0
+12116  ctx_rh_G_front_sup                     180  20 140   0
+12117  ctx_rh_G_Ins_lg_and_S_cent_ins          23  10  10   0
+12118  ctx_rh_G_insular_short                 225 140 140   0
+12119  ctx_rh_G_occipital_middle              180  60 180   0
+12120  ctx_rh_G_occipital_sup                  20 220  60   0
+12121  ctx_rh_G_oc-temp_lat-fusifor            60  20 140   0
+12122  ctx_rh_G_oc-temp_med-Lingual           220 180 140   0
+12123  ctx_rh_G_oc-temp_med-Parahip            65 100  20   0
+12124  ctx_rh_G_orbital                       220  60  20   0
+12125  ctx_rh_G_pariet_inf-Angular             20  60 220   0
+12126  ctx_rh_G_pariet_inf-Supramar           100 100  60   0
+12127  ctx_rh_G_parietal_sup                  220 180 220   0
+12128  ctx_rh_G_postcentral                    20 180 140   0
+12129  ctx_rh_G_precentral                     60 140 180   0
+12130  ctx_rh_G_precuneus                      25  20 140   0
+12131  ctx_rh_G_rectus                         20  60 100   0
+12132  ctx_rh_G_subcallosal                    60 220  20   0
+12133  ctx_rh_G_temp_sup-G_T_transv            60  60 220   0
+12134  ctx_rh_G_temp_sup-Lateral              220  60 220   0
+12135  ctx_rh_G_temp_sup-Plan_polar            65 220  60   0
+12136  ctx_rh_G_temp_sup-Plan_tempo            25 140  20   0
+12137  ctx_rh_G_temporal_inf                  220 220 100   0
+12138  ctx_rh_G_temporal_middle               180  60  60   0
+12139  ctx_rh_Lat_Fis-ant-Horizont             61  20 220   0
+12140  ctx_rh_Lat_Fis-ant-Vertical             61  20  60   0
+12141  ctx_rh_Lat_Fis-post                     61  60 100   0
+12142  ctx_rh_Medial_wall                      25  25  25   0
+12143  ctx_rh_Pole_occipital                  140  20  60   0
+12144  ctx_rh_Pole_temporal                   220 180  20   0
+12145  ctx_rh_S_calcarine                      63 180 180   0
+12146  ctx_rh_S_central                       221  20  10   0
+12147  ctx_rh_S_cingul-Marginalis             221  20 100   0
+12148  ctx_rh_S_circular_insula_ant           221  60 140   0
+12149  ctx_rh_S_circular_insula_inf           221  20 220   0
+12150  ctx_rh_S_circular_insula_sup            61 220 220   0
+12151  ctx_rh_S_collat_transv_ant             100 200 200   0
+12152  ctx_rh_S_collat_transv_post             10 200 200   0
+12153  ctx_rh_S_front_inf                     221 220  20   0
+12154  ctx_rh_S_front_middle                  141  20 100   0
+12155  ctx_rh_S_front_sup                      61 220 100   0
+12156  ctx_rh_S_interm_prim-Jensen            141  60  20   0
+12157  ctx_rh_S_intrapariet_and_P_trans       143  20 220   0
+12158  ctx_rh_S_oc_middle_and_Lunatus         101  60 220   0
+12159  ctx_rh_S_oc_sup_and_transversal         21  20 140   0
+12160  ctx_rh_S_occipital_ant                  61  20 180   0
+12161  ctx_rh_S_oc-temp_lat                   221 140  20   0
+12162  ctx_rh_S_oc-temp_med_and_Lingual       141 100 220   0
+12163  ctx_rh_S_orbital_lateral               221 100  20   0
+12164  ctx_rh_S_orbital_med-olfact            181 200  20   0
+12165  ctx_rh_S_orbital-H_Shaped              101  20  20   0
+12166  ctx_rh_S_parieto_occipital             101 100 180   0
+12167  ctx_rh_S_pericallosal                  181 220  20   0
+12168  ctx_rh_S_postcentral                    21 140 200   0
+12169  ctx_rh_S_precentral-inf-part            21  20 240   0
+12170  ctx_rh_S_precentral-sup-part            21  20 200   0
+12171  ctx_rh_S_suborbital                     21  20  60   0
+12172  ctx_rh_S_subparietal                   101  60  60   0
+12173  ctx_rh_S_temporal_inf                   21 180 180   0
+12174  ctx_rh_S_temporal_sup                  223 220  60   0
+12175  ctx_rh_S_temporal_transverse           221  60  60   0
+
+#No.   Label Name:                              R   G   B   A
+13100  wm_lh_Unknown                            0   0   0   0
+13101  wm_lh_G_and_S_frontomargin              23 220  60   0
+13102  wm_lh_G_and_S_occipital_inf             23  60 180   0
+13103  wm_lh_G_and_S_paracentral               63 100  60   0
+13104  wm_lh_G_and_S_subcentral                63  20 220   0
+13105  wm_lh_G_and_S_transv_frontopol          13   0 250   0
+13106  wm_lh_G_and_S_cingul-Ant                26  60   0   0
+13107  wm_lh_G_and_S_cingul-Mid-Ant            26  60  75   0
+13108  wm_lh_G_and_S_cingul-Mid-Post           26  60 150   0
+13109  wm_lh_G_cingul-Post-dorsal              25  60 250   0
+13110  wm_lh_G_cingul-Post-ventral             60  25  25   0
+13111  wm_lh_G_cuneus                         180  20  20   0
+13112  wm_lh_G_front_inf-Opercular            220  20 100   0
+13113  wm_lh_G_front_inf-Orbital              140  60  60   0
+13114  wm_lh_G_front_inf-Triangul             180 220 140   0
+13115  wm_lh_G_front_middle                   140 100 180   0
+13116  wm_lh_G_front_sup                      180  20 140   0
+13117  wm_lh_G_Ins_lg_and_S_cent_ins           23  10  10   0
+13118  wm_lh_G_insular_short                  225 140 140   0
+13119  wm_lh_G_occipital_middle               180  60 180   0
+13120  wm_lh_G_occipital_sup                   20 220  60   0
+13121  wm_lh_G_oc-temp_lat-fusifor             60  20 140   0
+13122  wm_lh_G_oc-temp_med-Lingual            220 180 140   0
+13123  wm_lh_G_oc-temp_med-Parahip             65 100  20   0
+13124  wm_lh_G_orbital                        220  60  20   0
+13125  wm_lh_G_pariet_inf-Angular              20  60 220   0
+13126  wm_lh_G_pariet_inf-Supramar            100 100  60   0
+13127  wm_lh_G_parietal_sup                   220 180 220   0
+13128  wm_lh_G_postcentral                     20 180 140   0
+13129  wm_lh_G_precentral                      60 140 180   0
+13130  wm_lh_G_precuneus                       25  20 140   0
+13131  wm_lh_G_rectus                          20  60 100   0
+13132  wm_lh_G_subcallosal                     60 220  20   0
+13133  wm_lh_G_temp_sup-G_T_transv             60  60 220   0
+13134  wm_lh_G_temp_sup-Lateral               220  60 220   0
+13135  wm_lh_G_temp_sup-Plan_polar             65 220  60   0
+13136  wm_lh_G_temp_sup-Plan_tempo             25 140  20   0
+13137  wm_lh_G_temporal_inf                   220 220 100   0
+13138  wm_lh_G_temporal_middle                180  60  60   0
+13139  wm_lh_Lat_Fis-ant-Horizont              61  20 220   0
+13140  wm_lh_Lat_Fis-ant-Vertical              61  20  60   0
+13141  wm_lh_Lat_Fis-post                      61  60 100   0
+13142  wm_lh_Medial_wall                       25  25  25   0
+13143  wm_lh_Pole_occipital                   140  20  60   0
+13144  wm_lh_Pole_temporal                    220 180  20   0
+13145  wm_lh_S_calcarine                       63 180 180   0
+13146  wm_lh_S_central                        221  20  10   0
+13147  wm_lh_S_cingul-Marginalis              221  20 100   0
+13148  wm_lh_S_circular_insula_ant            221  60 140   0
+13149  wm_lh_S_circular_insula_inf            221  20 220   0
+13150  wm_lh_S_circular_insula_sup             61 220 220   0
+13151  wm_lh_S_collat_transv_ant              100 200 200   0
+13152  wm_lh_S_collat_transv_post              10 200 200   0
+13153  wm_lh_S_front_inf                      221 220  20   0
+13154  wm_lh_S_front_middle                   141  20 100   0
+13155  wm_lh_S_front_sup                       61 220 100   0
+13156  wm_lh_S_interm_prim-Jensen             141  60  20   0
+13157  wm_lh_S_intrapariet_and_P_trans        143  20 220   0
+13158  wm_lh_S_oc_middle_and_Lunatus          101  60 220   0
+13159  wm_lh_S_oc_sup_and_transversal          21  20 140   0
+13160  wm_lh_S_occipital_ant                   61  20 180   0
+13161  wm_lh_S_oc-temp_lat                    221 140  20   0
+13162  wm_lh_S_oc-temp_med_and_Lingual        141 100 220   0
+13163  wm_lh_S_orbital_lateral                221 100  20   0
+13164  wm_lh_S_orbital_med-olfact             181 200  20   0
+13165  wm_lh_S_orbital-H_Shaped               101  20  20   0
+13166  wm_lh_S_parieto_occipital              101 100 180   0
+13167  wm_lh_S_pericallosal                   181 220  20   0
+13168  wm_lh_S_postcentral                     21 140 200   0
+13169  wm_lh_S_precentral-inf-part             21  20 240   0
+13170  wm_lh_S_precentral-sup-part             21  20 200   0
+13171  wm_lh_S_suborbital                      21  20  60   0
+13172  wm_lh_S_subparietal                    101  60  60   0
+13173  wm_lh_S_temporal_inf                    21 180 180   0
+13174  wm_lh_S_temporal_sup                   223 220  60   0
+13175  wm_lh_S_temporal_transverse            221  60  60   0
+
+14100  wm_rh_Unknown                            0   0   0   0
+14101  wm_rh_G_and_S_frontomargin              23 220  60   0
+14102  wm_rh_G_and_S_occipital_inf             23  60 180   0
+14103  wm_rh_G_and_S_paracentral               63 100  60   0
+14104  wm_rh_G_and_S_subcentral                63  20 220   0
+14105  wm_rh_G_and_S_transv_frontopol          13   0 250   0
+14106  wm_rh_G_and_S_cingul-Ant                26  60   0   0
+14107  wm_rh_G_and_S_cingul-Mid-Ant            26  60  75   0
+14108  wm_rh_G_and_S_cingul-Mid-Post           26  60 150   0
+14109  wm_rh_G_cingul-Post-dorsal              25  60 250   0
+14110  wm_rh_G_cingul-Post-ventral             60  25  25   0
+14111  wm_rh_G_cuneus                         180  20  20   0
+14112  wm_rh_G_front_inf-Opercular            220  20 100   0
+14113  wm_rh_G_front_inf-Orbital              140  60  60   0
+14114  wm_rh_G_front_inf-Triangul             180 220 140   0
+14115  wm_rh_G_front_middle                   140 100 180   0
+14116  wm_rh_G_front_sup                      180  20 140   0
+14117  wm_rh_G_Ins_lg_and_S_cent_ins           23  10  10   0
+14118  wm_rh_G_insular_short                  225 140 140   0
+14119  wm_rh_G_occipital_middle               180  60 180   0
+14120  wm_rh_G_occipital_sup                   20 220  60   0
+14121  wm_rh_G_oc-temp_lat-fusifor             60  20 140   0
+14122  wm_rh_G_oc-temp_med-Lingual            220 180 140   0
+14123  wm_rh_G_oc-temp_med-Parahip             65 100  20   0
+14124  wm_rh_G_orbital                        220  60  20   0
+14125  wm_rh_G_pariet_inf-Angular              20  60 220   0
+14126  wm_rh_G_pariet_inf-Supramar            100 100  60   0
+14127  wm_rh_G_parietal_sup                   220 180 220   0
+14128  wm_rh_G_postcentral                     20 180 140   0
+14129  wm_rh_G_precentral                      60 140 180   0
+14130  wm_rh_G_precuneus                       25  20 140   0
+14131  wm_rh_G_rectus                          20  60 100   0
+14132  wm_rh_G_subcallosal                     60 220  20   0
+14133  wm_rh_G_temp_sup-G_T_transv             60  60 220   0
+14134  wm_rh_G_temp_sup-Lateral               220  60 220   0
+14135  wm_rh_G_temp_sup-Plan_polar             65 220  60   0
+14136  wm_rh_G_temp_sup-Plan_tempo             25 140  20   0
+14137  wm_rh_G_temporal_inf                   220 220 100   0
+14138  wm_rh_G_temporal_middle                180  60  60   0
+14139  wm_rh_Lat_Fis-ant-Horizont              61  20 220   0
+14140  wm_rh_Lat_Fis-ant-Vertical              61  20  60   0
+14141  wm_rh_Lat_Fis-post                      61  60 100   0
+14142  wm_rh_Medial_wall                       25  25  25   0
+14143  wm_rh_Pole_occipital                   140  20  60   0
+14144  wm_rh_Pole_temporal                    220 180  20   0
+14145  wm_rh_S_calcarine                       63 180 180   0
+14146  wm_rh_S_central                        221  20  10   0
+14147  wm_rh_S_cingul-Marginalis              221  20 100   0
+14148  wm_rh_S_circular_insula_ant            221  60 140   0
+14149  wm_rh_S_circular_insula_inf            221  20 220   0
+14150  wm_rh_S_circular_insula_sup             61 220 220   0
+14151  wm_rh_S_collat_transv_ant              100 200 200   0
+14152  wm_rh_S_collat_transv_post              10 200 200   0
+14153  wm_rh_S_front_inf                      221 220  20   0
+14154  wm_rh_S_front_middle                   141  20 100   0
+14155  wm_rh_S_front_sup                       61 220 100   0
+14156  wm_rh_S_interm_prim-Jensen             141  60  20   0
+14157  wm_rh_S_intrapariet_and_P_trans        143  20 220   0
+14158  wm_rh_S_oc_middle_and_Lunatus          101  60 220   0
+14159  wm_rh_S_oc_sup_and_transversal          21  20 140   0
+14160  wm_rh_S_occipital_ant                   61  20 180   0
+14161  wm_rh_S_oc-temp_lat                    221 140  20   0
+14162  wm_rh_S_oc-temp_med_and_Lingual        141 100 220   0
+14163  wm_rh_S_orbital_lateral                221 100  20   0
+14164  wm_rh_S_orbital_med-olfact             181 200  20   0
+14165  wm_rh_S_orbital-H_Shaped               101  20  20   0
+14166  wm_rh_S_parieto_occipital              101 100 180   0
+14167  wm_rh_S_pericallosal                   181 220  20   0
+14168  wm_rh_S_postcentral                     21 140 200   0
+14169  wm_rh_S_precentral-inf-part             21  20 240   0
+14170  wm_rh_S_precentral-sup-part             21  20 200   0
+14171  wm_rh_S_suborbital                      21  20  60   0
+14172  wm_rh_S_subparietal                    101  60  60   0
+14173  wm_rh_S_temporal_inf                    21 180 180   0
+14174  wm_rh_S_temporal_sup                   223 220  60   0
+14175  wm_rh_S_temporal_transverse            221  60  60   0
+
diff --git a/mne/data/coil_def.dat b/mne/data/coil_def.dat
index 61eea53..dc4eee6 100644
--- a/mne/data/coil_def.dat
+++ b/mne/data/coil_def.dat
@@ -33,7 +33,7 @@
 #
 #       Produced with:
 #
-#	mne_list_coil_def version 1.12 compiled at Feb  4 2013 04:18:14
+#	mne_list_coil_def version 1.12 compiled at Nov 19 2014 04:19:15
 #
 3   2       0   2  2.789e-02  1.620e-02	"Neuromag-122 planar gradiometer size = 27.89  mm base = 16.20  mm"
  61.7284  8.100e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
@@ -381,6 +381,51 @@
  -0.2500 -1.500e-03  1.500e-03  5.000e-02  0.000  0.000  1.000
  -0.2500 -1.500e-03 -1.500e-03  5.000e-02  0.000  0.000  1.000
  -0.2500  1.500e-03 -1.500e-03  5.000e-02  0.000  0.000  1.000
+1   7002    0   1  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7002    1   4  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  0.2500  2.500e-03  2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.500e-03  2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -2.500e-03 -2.500e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  2.500e-03 -2.500e-03  0.000e+00  0.000  0.000  1.000
+1   7002    2   7  1.000e-02  0.000e+00	"BabyMEG system magnetometer size = 10.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  2.041e-03  3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  2.041e-03 -3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -2.041e-03  3.536e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -2.041e-03 -3.536e-03  0.000e+00  0.000  0.000  1.000
+1   7003    0   1  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7003    1   4  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  0.2500  5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+1   7003    2   7  2.000e-02  0.000e+00	"BabyMEG system compensation magnetometer size = 20.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+1   7004    0   1  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  1.0000  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+1   7004    1   4  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  0.2500  5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03  5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500 -5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+  0.2500  5.000e-03 -5.000e-03  0.000e+00  0.000  0.000  1.000
+1   7004    2   7  2.000e-02  0.000e+00	"BabyMEG system reference magnetometer size = 20.00  mm"
+  0.2500  0.000e+00  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250 -8.165e-03  0.000e+00  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250  4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03  7.071e-03  0.000e+00  0.000  0.000  1.000
+  0.1250 -4.082e-03 -7.071e-03  0.000e+00  0.000  0.000  1.000
 3   8001    0   2  7.000e-02  7.500e-02	"Sample TMS figure-of-eight coil size = 70.00  mm base = 75.00  mm"
  13.3333  3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
 -13.3333 -3.750e-02  0.000e+00  0.000e+00  0.000  0.000  1.000
diff --git a/mne/data/coil_def_Elekta.dat b/mne/data/coil_def_Elekta.dat
new file mode 100644
index 0000000..a15e3db
--- /dev/null
+++ b/mne/data/coil_def_Elekta.dat
@@ -0,0 +1,70 @@
+#
+#	MEG coil definition file for Maxwell Filtering
+#	
+#	These coil definitions make use of integration points according to the last
+#	formula in section 25.4.62 in the "Handbook of Mathematical Functions:
+#	With Formulas, Graphs, and Mathematical Tables" edited by Abramowitz and Stegun.
+#
+#	These coil definitions were used by Samu Taulu in the Spherical Space
+#	Separation work, which was subsequently used by Elekta in Maxfilter. The only 
+#	difference is that the local z-coordinate was set to zero in Taulu's original
+#	formulation.
+#
+#	Issues left to be sorted out.
+#	1) Discrepancy between gradiometer base size. 16.69 in Elekta, 16.80 in MNE
+#	2) Source of small z-coordinate offset (0.0003m). Not use in original SSS work,
+#	   but is present in Elekta's and MNE's coil definitions.
+#
+#	<class>	<id> <accuracy> <np> <size> <baseline> "<description>"
+#
+# struct class id accuracy num_points size baseline description
+# format '%d %d %d %d %e %e %s'
+#
+#	<w_1> <x_1/m> <y_1/m> <z_1/m> <nx_1> <ny_1> <nz_1>
+#
+# struct w     x       y       z       nx     ny     nz
+# format '%f %e %e %e %e %e %e'
+#
+#	....
+#
+#	<w_np> <x_np/m> <y_np/m> <z_np/m> <nx_np> <ny_np> <nz_np>
+#
+#	<class>		1	magnetometer
+#			2	axial gradiometer
+#			3	planar gradiometer
+#			4	axial second-order gradiometer
+#
+#	<accuracy>	0       point approximation
+#                       1	normal
+#			2	accurate
+#
+#
+3   3012    2   8  2.639e-02  1.669e-02	"Vectorview planar gradiometer T1 size = 26.39  mm base = 16.69  mm"
+1.4979029359e+01  1.0800000000e-02  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  5.8900000000e-03  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  5.8900000000e-03  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.4979029359e+01  1.0800000000e-02  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -1.0800000000e-02  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -5.8900000000e-03  6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -5.8900000000e-03  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+-1.4979029359e+01  -1.0800000000e-02  -6.7100000000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1   3022    2  9  2.580e-02  0.000e+00	"Vectorview magnetometer T1 size = 25.80  mm"
+7.7160493800e-02  -9.9922970000e-03  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  9.9922970000e-03  9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  -9.9922970000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.9753086420e-01  0.0000000000e+00  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  9.9922970000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  -9.9922970000e-03  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  9.9922970000e-03  -9.9922970000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1   3024    2  9  2.100e-02  0.000e+00	"Vectorview magnetometer T3 size = 21.00  mm"
+7.7160493800e-02  -8.1332650000e-03  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  8.1332650000e-03  8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  -8.1332650000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.9753086420e-01  0.0000000000e+00  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  8.1332650000e-03  0.0000000000e+00  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  -8.1332650000e-03  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+1.2345679010e-01  0.0000000000e+00  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
+7.7160493800e-02  8.1332650000e-03  -8.1332650000e-03  3.0000000000e-04  0.0000000000e+00  0.0000000000e+00  1.0000000000e+00
diff --git a/mne/data/image/custom_layout.lout b/mne/data/image/custom_layout.lout
new file mode 100644
index 0000000..a2f8ed8
--- /dev/null
+++ b/mne/data/image/custom_layout.lout
@@ -0,0 +1,24 @@
+    0.00     0.00     0.08     0.04
+000     0.13     0.34     0.07     0.05 0
+001     0.16     0.49     0.07     0.05 1
+002     0.18     0.63     0.07     0.05 2
+003     0.25     0.52     0.07     0.05 3
+004     0.32     0.63     0.07     0.05 4
+005     0.34     0.49     0.07     0.05 5
+006     0.36     0.35     0.07     0.05 6
+007     0.44     0.35     0.07     0.05 7
+008     0.45     0.49     0.07     0.05 8
+009     0.45     0.63     0.07     0.05 9
+010     0.48     0.53     0.07     0.05 10
+011     0.52     0.44     0.07     0.05 11
+012     0.56     0.35     0.07     0.05 12
+013     0.56     0.49     0.07     0.05 13
+014     0.56     0.63     0.07     0.05 14
+015     0.69     0.62     0.07     0.05 15
+016     0.69     0.55     0.07     0.05 16
+017     0.69     0.49     0.07     0.05 17
+018     0.69     0.42     0.07     0.05 18
+019     0.69     0.34     0.07     0.05 19
+020     0.77     0.35     0.07     0.05 20
+021     0.77     0.49     0.07     0.05 21
+022     0.77     0.63     0.07     0.05 22
diff --git a/mne/data/image/mni_brain.gif b/mne/data/image/mni_brain.gif
new file mode 100644
index 0000000..3d6cc08
Binary files /dev/null and b/mne/data/image/mni_brain.gif differ
diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py
index 38ea9f3..e0530bd 100644
--- a/mne/datasets/__init__.py
+++ b/mne/datasets/__init__.py
@@ -4,5 +4,8 @@
 from . import sample
 from . import megsim
 from . import spm_face
+from . import brainstorm
 from . import eegbci
 from . import somato
+from . import testing
+from . import _fake
diff --git a/mne/datasets/_fake/__init__.py b/mne/datasets/_fake/__init__.py
new file mode 100644
index 0000000..b807fc4
--- /dev/null
+++ b/mne/datasets/_fake/__init__.py
@@ -0,0 +1,4 @@
+"""MNE sample dataset
+"""
+
+from ._fake import data_path, get_version
diff --git a/mne/datasets/_fake/_fake.py b/mne/datasets/_fake/_fake.py
new file mode 100644
index 0000000..580253b
--- /dev/null
+++ b/mne/datasets/_fake/_fake.py
@@ -0,0 +1,25 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+from ...utils import verbose
+from ..utils import (_data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=False,
+              download=True, verbose=None):
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='fake',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='fake',
+                                          conf='MNE_DATASETS_FAKE_PATH')
+
+
+def get_version():
+    return _get_version('fake')
+
+get_version.__doc__ = _version_doc.format(name='fake')
diff --git a/mne/datasets/brainstorm/__init__.py b/mne/datasets/brainstorm/__init__.py
new file mode 100644
index 0000000..eb985dc
--- /dev/null
+++ b/mne/datasets/brainstorm/__init__.py
@@ -0,0 +1,4 @@
+"""Brainstorm Dataset
+"""
+
+from . import bst_raw, bst_resting, bst_auditory
diff --git a/mne/datasets/brainstorm/bst_auditory.py b/mne/datasets/brainstorm/bst_auditory.py
new file mode 100644
index 0000000..2cbe827
--- /dev/null
+++ b/mne/datasets/brainstorm/bst_auditory.py
@@ -0,0 +1,60 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory
+    - One subject, two acquisition runs of 6 minutes each
+    - Subject stimulated binaurally with intra-aural earphones
+      (air tubes+transducers)
+    - Each run contains:
+        - 200 regular beeps (440Hz)
+        - 40 easy deviant beeps (554.4Hz, 4 semitones higher)
+    - Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly
+      distributed
+    - The subject presses a button when detecting a deviant with the right
+      index finger
+    - Auditory stimuli generated with the Matlab Psychophysics toolbox
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_auditory.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_auditory')
+    else:
+        return data_path
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_auditory) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_auditory) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/mne/datasets/brainstorm/bst_raw.py b/mne/datasets/brainstorm/bst_raw.py
new file mode 100644
index 0000000..1033008
--- /dev/null
+++ b/mne/datasets/brainstorm/bst_raw.py
@@ -0,0 +1,59 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf
+    - One subject, one acquisition run of 6 minutes
+    - Subject stimulated using Digitimer Constant Current Stimulator
+      (model DS7A)
+    - The run contains 200 electric stimulations randomly distributed between
+      left and right:
+        - 102 stimulations of the left hand
+        - 98 stimulations of the right hand
+    - Inter-stimulus interval: jittered between [1500, 2000]ms
+    - Stimuli generated using PsychToolBox on Windows PC (TTL pulse generated
+      with the parallel port connected to the Digitimer via the rear panel BNC)
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_raw.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_raw')
+    else:
+        return data_path
+
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_raw) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_raw) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/mne/datasets/brainstorm/bst_resting.py b/mne/datasets/brainstorm/bst_resting.py
new file mode 100644
index 0000000..3d33652
--- /dev/null
+++ b/mne/datasets/brainstorm/bst_resting.py
@@ -0,0 +1,51 @@
+# Authors: Mainak Jas <mainak.jas at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+from ...utils import verbose
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
+                     _data_path_doc)
+
+has_brainstorm_data = partial(has_dataset, name='brainstorm')
+
+_description = u"""
+URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
+    - One subject
+    - Two runs of 10 min of resting state recordings
+    - Eyes open
+"""
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
+    archive_name = dict(brainstorm='bst_resting.tar.bz2')
+    data_path = _data_path(path=path, force_update=force_update,
+                           update_path=update_path, name='brainstorm',
+                           download=download, archive_name=archive_name)
+    if data_path != '':
+        return op.join(data_path, 'bst_resting')
+    else:
+        return data_path
+
+_data_path_doc = _data_path_doc.format(name='brainstorm',
+                                       conf='MNE_DATASETS_BRAINSTORM_DATA'
+                                            '_PATH')
+_data_path_doc = _data_path_doc.replace('brainstorm dataset',
+                                        'brainstorm (bst_resting) dataset')
+data_path.__doc__ = _data_path_doc
+
+
+def get_version():
+    return _get_version('brainstorm')
+
+get_version.__doc__ = _version_doc.format(name='brainstorm')
+
+
+def description():
+    """Get description of brainstorm (bst_resting) dataset
+    """
+    for desc in _description.splitlines():
+        print(desc)
diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py
index 56ee632..274b66e 100644
--- a/mne/datasets/eegbci/eegbci.py
+++ b/mne/datasets/eegbci/eegbci.py
@@ -3,17 +3,17 @@
 
 import os
 from os import path as op
-from ...externals.six import string_types
-from ...utils import _fetch_file, get_config, set_config, _url_to_local_path
 
-if 'raw_input' not in __builtins__:
-    raw_input = input
+from ..utils import _get_path, _do_path_update
+from ...utils import _fetch_file, _url_to_local_path, verbose
 
 
 EEGMI_URL = 'http://www.physionet.org/physiobank/database/eegmmidb/'
 
 
-def data_path(url, path=None, force_update=False, update_path=None):
+ at verbose
+def data_path(url, path=None, force_update=False, update_path=None,
+              verbose=None):
     """Get path to local copy of EEGMMI dataset URL
 
     This is a low-level function useful for getting a local copy of a
@@ -36,6 +36,8 @@ def data_path(url, path=None, force_update=False, update_path=None):
     update_path : bool | None
         If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
         config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -64,42 +66,11 @@ def data_path(url, path=None, force_update=False, update_path=None):
         Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
         PhysioToolkit, and PhysioNet: Components of a New Research Resource for
         Complex Physiologic Signals. Circulation 101(23):e215-e220
-    """
-
-    if path is None:
-        # use an intelligent guess if it's not defined
-        def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
-                                       '..', 'examples'))
-
-        key = 'MNE_DATASETS_EEGBCI_PATH'
-        # backward compatibility
-        if get_config(key) is None:
-            key = 'MNE_DATA'
-
-        path = get_config(key, def_path)
-
-        # use the same for all datasets
-        if not op.exists(path) or not os.access(path, os.W_OK):
-            try:
-                os.mkdir(path)
-            except OSError:
-                try:
-                    logger.info("Checking for EEGBCI data in '~/mne_data'...")
-                    path = op.join(op.expanduser("~"), "mne_data")
-                    if not op.exists(path):
-                        logger.info("Trying to create "
-                                    "'~/mne_data' in home directory")
-                        os.mkdir(path)
-                except OSError:
-                    raise OSError("User does not have write permissions "
-                                  "at '%s', try giving the path as an argument "
-                                  "to data_path() where user has write "
-                                  "permissions, for ex:data_path"
-                                  "('/home/xyz/me2/')" % (path))
-
-    if not isinstance(path, string_types):
-        raise ValueError('path must be a string or None')
+    """  # noqa
 
+    key = 'MNE_DATASETS_EEGBCI_PATH'
+    name = 'EEGBCI'
+    path = _get_path(path, key, name)
     destination = _url_to_local_path(url, op.join(path, 'MNE-eegbci-data'))
     destinations = [destination]
 
@@ -112,26 +83,13 @@ def data_path(url, path=None, force_update=False, update_path=None):
         _fetch_file(url, destination, print_destination=False)
 
     # Offer to update the path
-    path = op.abspath(path)
-    if update_path is None:
-        if get_config(key, '') != path:
-            update_path = True
-            msg = ('Do you want to set the path:\n    %s\nas the default '
-                   'EEGBCI dataset path in the mne-python config ([y]/n)? '
-                   % path)
-            answer = raw_input(msg)
-            if answer.lower() == 'n':
-                update_path = False
-        else:
-            update_path = False
-    if update_path is True:
-        set_config(key, path)
-
+    _do_path_update(path, update_path, key, name)
     return destinations
 
 
+ at verbose
 def load_data(subject, runs, path=None, force_update=False, update_path=None,
-              base_url=EEGMI_URL):
+              base_url=EEGMI_URL, verbose=None):
     """Get paths to local copy of EEGBCI dataset files
 
     Parameters
@@ -162,6 +120,8 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None,
     update_path : bool | None
         If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
         config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
diff --git a/mne/datasets/megsim/megsim.py b/mne/datasets/megsim/megsim.py
index 3fa5fd3..44e77fb 100644
--- a/mne/datasets/megsim/megsim.py
+++ b/mne/datasets/megsim/megsim.py
@@ -1,18 +1,20 @@
 # Author: Eric Larson <larson.eric.d at gmail.com>
 # License: BSD Style.
 
-from ...externals.six import string_types
 import os
 from os import path as op
 import zipfile
 from sys import stdout
 
-from ...utils import _fetch_file, get_config, set_config, _url_to_local_path
+from ...utils import _fetch_file, _url_to_local_path, verbose
+from ..utils import _get_path, _do_path_update
 from .urls import (url_match, valid_data_types, valid_data_formats,
                    valid_conditions)
 
 
-def data_path(url, path=None, force_update=False, update_path=None):
+ at verbose
+def data_path(url, path=None, force_update=False, update_path=None,
+              verbose=None):
     """Get path to local copy of MEGSIM dataset URL
 
     This is a low-level function useful for getting a local copy of a
@@ -35,6 +37,8 @@ def data_path(url, path=None, force_update=False, update_path=None):
     update_path : bool | None
         If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
         config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -60,39 +64,10 @@ def data_path(url, path=None, force_update=False, update_path=None):
         Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
         (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
         Realistic Simulated and Empirical Data. Neuroinform 10:141-158
-    """
-
-    if path is None:
-        # use an intelligent guess if it's not defined
-        def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
-                                      '..', 'examples'))
-        key = 'MNE_DATASETS_MEGSIM_PATH'
-        if get_config(key) is None:
-            key = 'MNE_DATA'
-        path = get_config(key, def_path)
-
-        # use the same for all datasets
-        if not op.exists(path) or not os.access(path, os.W_OK):
-            try:
-                os.mkdir(path)
-            except OSError:
-                try:
-                    logger.info("Checking for megsim data in '~/mne_data'...")
-                    path = op.join(op.expanduser("~"), "mne_data")
-                    if not op.exists(path):
-                        logger.info("Trying to create "
-                                    "'~/mne_data' in home directory")
-                        os.mkdir(path)
-                except OSError:
-                    raise OSError("User does not have write permissions "
-                                  "at '%s', try giving the path as an argument "
-                                  "to data_path() where user has write "
-                                  "permissions, for ex:data_path"
-                                  "('/home/xyz/me2/')" % (path))
-
-    if not isinstance(path, string_types):
-        raise ValueError('path must be a string or None')
-
+    """  # noqa
+    key = 'MNE_DATASETS_MEGSIM_PATH'
+    name = 'MEGSIM'
+    path = _get_path(path, key, name)
     destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
     destinations = [destination]
 
@@ -121,27 +96,13 @@ def data_path(url, path=None, force_update=False, update_path=None):
         z.close()
         destinations = [op.join(decomp_dir, f) for f in files]
 
-    # Offer to update the path
-    path = op.abspath(path)
-    if update_path is None:
-        if get_config(key, '') != path:
-            update_path = True
-            msg = ('Do you want to set the path:\n    %s\nas the default '
-                   'MEGSIM dataset path in the mne-python config ([y]/n)? '
-                   % path)
-            answer = raw_input(msg)
-            if answer.lower() == 'n':
-                update_path = False
-        else:
-            update_path = False
-    if update_path is True:
-        set_config(key, path)
-
+    path = _do_path_update(path, update_path, key, name)
     return destinations
 
 
+ at verbose
 def load_data(condition='visual', data_format='raw', data_type='experimental',
-              path=None, force_update=False, update_path=None):
+              path=None, force_update=False, update_path=None, verbose=None):
     """Get path to local copy of MEGSIM dataset type
 
     Parameters
@@ -165,6 +126,8 @@ def load_data(condition='visual', data_format='raw', data_type='experimental',
     update_path : bool | None
         If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
         config to the given path. If None, the user is prompted.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -187,13 +150,13 @@ def load_data(condition='visual', data_format='raw', data_type='experimental',
         Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
         (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
         Realistic Simulated and Empirical Data. Neuroinform 10:141-158
-    """
+    """  # noqa
 
     if not condition.lower() in valid_conditions:
         raise ValueError('Unknown condition "%s"' % condition)
-    if not data_format in valid_data_formats:
+    if data_format not in valid_data_formats:
         raise ValueError('Unknown data_format "%s"' % data_format)
-    if not data_type in valid_data_types:
+    if data_type not in valid_data_types:
         raise ValueError('Unknown data_type "%s"' % data_type)
     urls = url_match(condition, data_format, data_type)
 
diff --git a/mne/datasets/megsim/urls.py b/mne/datasets/megsim/urls.py
index c3a23e4..409e60f 100644
--- a/mne/datasets/megsim/urls.py
+++ b/mne/datasets/megsim/urls.py
@@ -3,10 +3,6 @@
 
 import numpy as np
 
-valid_data_types = ['experimental', 'simulation']
-valid_data_formats = ['single-trial', 'evoked', 'raw']
-valid_conditions = ['visual', 'auditory', 'somatosensory']
-
 url_root = 'http://cobre.mrn.org/megsim'
 
 urls = ['/empdata/neuromag/visual/subject1_day1_vis_raw.fif',
@@ -31,8 +27,11 @@ urls = ['/empdata/neuromag/visual/subject1_day1_vis_raw.fif',
         '/simdata/neuromag/visual/M87174545_vis_sim5_4mm_30na_neuro_rn.fif',
 
         '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_fif.zip',
-        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_withOsc_fif.zip',
-        '/simdata_singleTrials/4545_sim_oscOnly_v1_IPS_ILOG_30hzAdded.fif']
+        '/simdata_singleTrials/subject1_singleTrials_VisWorkingMem_withOsc_fif.zip',  # noqa
+        '/simdata_singleTrials/4545_sim_oscOnly_v1_IPS_ILOG_30hzAdded.fif',
+
+        '/index.html',
+]
 
 data_formats = ['raw',
                 'raw',
@@ -58,7 +57,9 @@ data_formats = ['raw',
                 'single-trial',
                 'single-trial',
                 'single-trial',
-                ]
+
+                'text']
+
 subjects = ['subject_1',
             'subject_1',
             'subject_3',
@@ -82,7 +83,9 @@ subjects = ['subject_1',
 
             'subject_1',
             'subject_1',
-            'subject_1']
+            'subject_1',
+
+            '']
 
 data_types = ['experimental',
               'experimental',
@@ -107,7 +110,9 @@ data_types = ['experimental',
 
               'simulation',
               'simulation',
-              'simulation']
+              'simulation',
+
+              'text']
 
 conditions = ['visual',
               'visual',
@@ -132,7 +137,13 @@ conditions = ['visual',
 
               'visual',
               'visual',
-              'visual']
+              'visual',
+
+              'index']
+
+valid_data_types = list(set(data_types))
+valid_data_formats = list(set(data_formats))
+valid_conditions = list(set(conditions))
 
 # turn them into arrays for ease of use
 urls = np.atleast_1d(urls)
@@ -142,8 +153,9 @@ data_types = np.atleast_1d(data_types)
 conditions = np.atleast_1d(conditions)
 
 # Useful for testing
-#assert len(conditions) == len(data_types) == len(subjects) \
-#    == len(data_formats) == len(urls)
+# assert len(conditions) == len(data_types) == len(subjects) \
+#     == len(data_formats) == len(urls)
+
 
 def url_match(condition, data_format, data_type):
     """Function to match MEGSIM data files"""
diff --git a/mne/datasets/sample/__init__.py b/mne/datasets/sample/__init__.py
index 1f06c2d..6b1faf2 100644
--- a/mne/datasets/sample/__init__.py
+++ b/mne/datasets/sample/__init__.py
@@ -1,4 +1,5 @@
 """MNE sample dataset
 """
 
-from .sample import data_path, has_sample_data, requires_sample_data
+from .sample import (data_path, has_sample_data, get_version,
+                     requires_sample_data)
diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py
index d5a6532..46f40d9 100644
--- a/mne/datasets/sample/sample.py
+++ b/mne/datasets/sample/sample.py
@@ -5,30 +5,37 @@
 
 import numpy as np
 
-from ...utils import get_config, verbose
+from ...utils import verbose, get_config
 from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
 
 
 has_sample_data = partial(has_dataset, name='sample')
 
 
 @verbose
-def data_path(path=None, force_update=False, update_path=True,
-              download=True, verbose=None):
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
     return _data_path(path=path, force_update=force_update,
                       update_path=update_path, name='sample',
-                      download=download,
-                      verbose=verbose)
+                      download=download)
 
-data_path.__doc__ = _doc.format(name='sample',
-                                conf='MNE_DATASETS_SAMPLE_PATH')
+data_path.__doc__ = _data_path_doc.format(name='sample',
+                                          conf='MNE_DATASETS_SAMPLE_PATH')
 
-# Allow forcing of sample dataset skip (for tests) using:
-# `make test-no-sample`
+
+def get_version():
+    return _get_version('sample')
+
+get_version.__doc__ = _version_doc.format(name='sample')
+
+
+# Allow forcing of sample dataset skip
 def _skip_sample_data():
-    skip_sample = get_config('MNE_SKIP_SAMPLE_DATASET_TESTS', 'false') == 'true'
-    skip = skip_sample or not has_sample_data()
+    skip_testing = (get_config('MNE_SKIP_SAMPLE_DATASET_TESTS', 'false') ==
+                    'true')
+    skip = skip_testing or not has_sample_data()
     return skip
 
 requires_sample_data = np.testing.dec.skipif(_skip_sample_data,
diff --git a/mne/datasets/somato/__init__.py b/mne/datasets/somato/__init__.py
index bdc4725..aa3f82d 100644
--- a/mne/datasets/somato/__init__.py
+++ b/mne/datasets/somato/__init__.py
@@ -1,4 +1,4 @@
 """Somatosensory dataset
 """
 
-from .somato import data_path, has_somato_data, requires_somato_data
+from .somato import data_path, has_somato_data, get_version
diff --git a/mne/datasets/somato/somato.py b/mne/datasets/somato/somato.py
index 89b1781..d0daf98 100644
--- a/mne/datasets/somato/somato.py
+++ b/mne/datasets/somato/somato.py
@@ -1,35 +1,29 @@
-# Authors: Alexandre Gramfort <gramfort at nmr.mgh.harvard.edu>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD Style.
 
-import numpy as np
-
-from ...utils import get_config, verbose
+from ...utils import verbose
 from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
 
 
 has_somato_data = partial(has_dataset, name='somato')
 
 
 @verbose
-def data_path(path=None, force_update=False, update_path=True,
-              download=True, verbose=None):
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
     return _data_path(path=path, force_update=force_update,
                       update_path=update_path, name='somato',
-                      download=download,
-                      verbose=verbose)
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='somato',
+                                          conf='MNE_DATASETS_SOMATO_PATH')
 
-data_path.__doc__ = _doc.format(name='somato',
-                                conf='MNE_DATASETS_SOMATO_PATH')
 
-# Allow forcing of somato dataset skip (for tests) using:
-# `make test-no-somato`
-def _skip_somato_data():
-    skip_somato = get_config('MNE_SKIP_SOMATO_DATASET_TESTS', 'false') == 'true'
-    skip = skip_somato or not has_somato_data()
-    return skip
+def get_version():
+    return _get_version('somato')
 
-requires_somato_data = np.testing.dec.skipif(_skip_somato_data,
-                                             'Requires somato dataset')
+get_version.__doc__ = _version_doc.format(name='somato')
diff --git a/mne/datasets/spm_face/__init__.py b/mne/datasets/spm_face/__init__.py
index f151c52..90f01c7 100644
--- a/mne/datasets/spm_face/__init__.py
+++ b/mne/datasets/spm_face/__init__.py
@@ -1,4 +1,4 @@
 """SPM face dataset
 """
 
-from .spm_data import data_path, has_spm_data, requires_spm_data
+from .spm_data import data_path, has_spm_data, get_version
diff --git a/mne/datasets/spm_face/spm_data.py b/mne/datasets/spm_face/spm_data.py
index 7471e70..19c6461 100644
--- a/mne/datasets/spm_face/spm_data.py
+++ b/mne/datasets/spm_face/spm_data.py
@@ -2,33 +2,27 @@
 #
 # License: BSD Style.
 
-import numpy as np
-
-from ...utils import get_config, verbose
+from ...utils import verbose
 from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
 
 
 has_spm_data = partial(has_dataset, name='spm')
 
 
 @verbose
-def data_path(path=None, force_update=False, update_path=True,
-              download=True, verbose=None):
+def data_path(path=None, force_update=False, update_path=True, download=True,
+              verbose=None):
     return _data_path(path=path, force_update=force_update,
                       update_path=update_path, name='spm',
-                      download=download,
-                      verbose=verbose)
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='spm',
+                                          conf='MNE_DATASETS_SPM_DATA_PATH')
 
-data_path.__doc__ = _doc.format(name='spm',
-                                conf='MNE_DATASETS_SPM_DATA_PATH')
 
-# Allow forcing of sample dataset skip (for tests) using:
-# `make test-no-sample`
-def _skip_spm_sample_data():
-    skip_spm = get_config('MNE_SKIP_SPM_DATASET_TESTS', 'false') == 'true'
-    skip = skip_spm or not has_spm_data()
-    return skip
+def get_version():
+    return _get_version('spm')
 
-requires_spm_data = np.testing.dec.skipif(_skip_spm_sample_data,
-                                          'Requires spm dataset')
+get_version.__doc__ = _version_doc.format(name='spm')
diff --git a/mne/datasets/testing/__init__.py b/mne/datasets/testing/__init__.py
new file mode 100644
index 0000000..7fa74ee
--- /dev/null
+++ b/mne/datasets/testing/__init__.py
@@ -0,0 +1,4 @@
+"""MNE sample dataset
+"""
+
+from ._testing import data_path, requires_testing_data, get_version
diff --git a/mne/datasets/testing/_testing.py b/mne/datasets/testing/_testing.py
new file mode 100644
index 0000000..932bd2e
--- /dev/null
+++ b/mne/datasets/testing/_testing.py
@@ -0,0 +1,47 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: BSD Style.
+
+import numpy as np
+
+from ...utils import verbose, get_config
+from ...fixes import partial
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+                     _get_version, _version_doc)
+
+
+has_testing_data = partial(has_dataset, name='testing')
+
+
+ at verbose
+def data_path(path=None, force_update=False, update_path=True,
+              download=True, verbose=None):
+    # Make sure we don't do something stupid
+    if download and \
+            get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') == 'true':
+        raise RuntimeError('Cannot download data if skipping is forced')
+    return _data_path(path=path, force_update=force_update,
+                      update_path=update_path, name='testing',
+                      download=download)
+
+data_path.__doc__ = _data_path_doc.format(name='testing',
+                                          conf='MNE_DATASETS_TESTING_PATH')
+
+
+def get_version():
+    return _get_version('testing')
+
+get_version.__doc__ = _version_doc.format(name='testing')
+
+
+# Allow forcing of testing dataset skip (for Debian tests) using:
+# `make test-no-testing-data`
+def _skip_testing_data():
+    skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') ==
+                    'true')
+    skip = skip_testing or not has_testing_data()
+    return skip
+
+requires_testing_data = np.testing.dec.skipif(_skip_testing_data,
+                                              'Requires testing dataset')
diff --git a/doc/sphinxext/numpy_ext_old/__init__.py b/mne/datasets/tests/__init__.py
similarity index 100%
rename from doc/sphinxext/numpy_ext_old/__init__.py
rename to mne/datasets/tests/__init__.py
diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py
new file mode 100644
index 0000000..34614ca
--- /dev/null
+++ b/mne/datasets/tests/test_datasets.py
@@ -0,0 +1,46 @@
+from os import path as op
+from nose.tools import assert_true, assert_equal
+
+from mne import datasets
+from mne.externals.six import string_types
+from mne.utils import _TempDir, run_tests_if_main, requires_good_network
+
+
+def test_datasets():
+    """Test simple dataset functions
+    """
+    for dname in ('sample', 'somato', 'spm_face', 'testing',
+                  'bst_raw', 'bst_auditory', 'bst_resting'):
+        if dname.startswith('bst'):
+            dataset = getattr(datasets.brainstorm, dname)
+        else:
+            dataset = getattr(datasets, dname)
+    if dataset.data_path(download=False) != '':
+        assert_true(isinstance(dataset.get_version(), string_types))
+    else:
+        assert_true(dataset.get_version() is None)
+
+
+ at requires_good_network
+def test_megsim():
+    """Test MEGSIM URL handling
+    """
+    data_dir = _TempDir()
+    paths = datasets.megsim.load_data(
+        'index', 'text', 'text', path=data_dir, update_path=False)
+    assert_equal(len(paths), 1)
+    assert_true(paths[0].endswith('index.html'))
+
+
+ at requires_good_network
+def test_downloads():
+    """Test dataset URL handling
+    """
+    # Try actually downloading a dataset
+    data_dir = _TempDir()
+    path = datasets._fake.data_path(path=data_dir, update_path=False)
+    assert_true(op.isfile(op.join(path, 'bar')))
+    assert_true(datasets._fake.get_version() is None)
+
+
+run_tests_if_main()
diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py
index b876730..b333b58 100644
--- a/mne/datasets/utils.py
+++ b/mne/datasets/utils.py
@@ -4,18 +4,20 @@
 #          Denis Egnemann <denis.engemann at gmail.com>
 # License: BSD Style.
 
-from ..externals.six import string_types
 import os
 import os.path as op
 import shutil
 import tarfile
 from warnings import warn
+import stat
 
 from .. import __version__ as mne_version
 from ..utils import get_config, set_config, _fetch_file, logger
+from ..externals.six import string_types
+from ..externals.six.moves import input
 
 
-_doc = """Get path to local copy of {name} dataset
+_data_path_doc = """Get path to local copy of {name} dataset
 
     Parameters
     ----------
@@ -23,12 +25,12 @@ _doc = """Get path to local copy of {name} dataset
         Location of where to look for the {name} dataset.
         If None, the environment variable or config parameter
         {conf} is used. If it doesn't exist, the
-        "mne-python/examples" directory is used. If the sample dataset
+        "mne-python/examples" directory is used. If the {name} dataset
         is not found under the given path (e.g., as
         "mne-python/examples/MNE-{name}-data"), the data
         will be automatically downloaded to the specified folder.
     force_update : bool
-        Force update of the sample dataset even if a local copy exists.
+        Force update of the {name} dataset even if a local copy exists.
     update_path : bool | None
         If True, set the {conf} in mne-python
         config to the given path. If None, the user is prompted.
@@ -47,13 +49,41 @@ _doc = """Get path to local copy of {name} dataset
 """
 
 
+_version_doc = """Get version of the local {name} dataset
+
+    Returns
+    -------
+    version : str | None
+        Version of the {name} local dataset, or None if the dataset
+        does not exist locally.
+"""
+
+
+_bst_license_text = """
+License
+-------
+This tutorial dataset (EEG and MRI data) remains a property of the MEG Lab,
+McConnell Brain Imaging Center, Montreal Neurological Institute,
+McGill University, Canada. Its use and transfer outside the Brainstorm
+tutorial, e.g. for research purposes, is prohibited without written consent
+from the MEG Lab.
+
+If you reference this dataset in your publications, please:
+1) aknowledge its authors: Elizabeth Bock, Esther Florin, Francois Tadel and
+Sylvain Baillet
+2) cite Brainstorm as indicated on the website:
+http://neuroimage.usc.edu/brainstorm
+
+For questions, please contact Francois Tadel (francois.tadel at mcgill.ca).
+"""
+
+
 def _dataset_version(path, name):
     """Get the version of the dataset"""
     ver_fname = op.join(path, 'version.txt')
     if op.exists(ver_fname):
-        fid = open(ver_fname, 'r')
-        version = fid.readline().strip()  # version is on first line
-        fid.close()
+        with open(ver_fname, 'r') as fid:
+            version = fid.readline().strip()  # version is on first line
     else:
         # Sample dataset versioning was introduced after 0.3
         # SPM dataset was introduced with 0.7
@@ -62,69 +92,137 @@ def _dataset_version(path, name):
     return version
 
 
-def _data_path(path=None, force_update=False, update_path=True,
-               download=True, name=None, check_version=True, verbose=None):
+def _get_path(path, key, name):
+    """Helper to get a dataset path"""
+    if path is None:
+            # use an intelligent guess if it's not defined
+            def_path = op.realpath(op.join(op.dirname(__file__), '..', '..',
+                                           '..', 'examples'))
+            if get_config(key) is None:
+                key = 'MNE_DATA'
+            path = get_config(key, def_path)
+
+            # use the same for all datasets
+            if not op.exists(path) or not os.access(path, os.W_OK):
+                try:
+                    os.mkdir(path)
+                except OSError:
+                    try:
+                        logger.info('Checking for %s data in '
+                                    '"~/mne_data"...' % name)
+                        path = op.join(op.expanduser("~"), "mne_data")
+                        if not op.exists(path):
+                            logger.info("Trying to create "
+                                        "'~/mne_data' in home directory")
+                            os.mkdir(path)
+                    except OSError:
+                        raise OSError("User does not have write permissions "
+                                      "at '%s', try giving the path as an "
+                                      "argument to data_path() where user has "
+                                      "write permissions, for ex:data_path"
+                                      "('/home/xyz/me2/')" % (path))
+    if not isinstance(path, string_types):
+        raise ValueError('path must be a string or None')
+    return path
+
+
+def _do_path_update(path, update_path, key, name):
+    """Helper to update path"""
+    path = op.abspath(path)
+    if update_path is None:
+        if get_config(key, '') != path:
+            update_path = True
+            msg = ('Do you want to set the path:\n    %s\nas the default '
+                   '%s dataset path in the mne-python config [y]/n? '
+                   % (path, name))
+            answer = input(msg)
+            if answer.lower() == 'n':
+                update_path = False
+        else:
+            update_path = False
+
+    if update_path is True:
+        set_config(key, path)
+    return path
+
+
+def _data_path(path=None, force_update=False, update_path=True, download=True,
+               name=None, check_version=False, return_version=False,
+               archive_name=None):
     """Aux function
     """
     key = {'sample': 'MNE_DATASETS_SAMPLE_PATH',
            'spm': 'MNE_DATASETS_SPM_FACE_PATH',
-           'somato': 'MNE_DATASETS_SOMATO_PATH',}[name]
-
-    if path is None:
-        # use an intelligent guess if it's not defined
-        def_path = op.realpath(op.join(op.dirname(__file__),
-                                       '..', '..', 'examples'))
-
-        # backward compatibility
-        if get_config(key) is None:
-            key = 'MNE_DATA'
+           'somato': 'MNE_DATASETS_SOMATO_PATH',
+           'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
+           'testing': 'MNE_DATASETS_TESTING_PATH',
+           'fake': 'MNE_DATASETS_FAKE_PATH',
+           }[name]
 
-        path = get_config(key, def_path)
+    path = _get_path(path, key, name)
+    archive_names = dict(
+        sample='MNE-sample-data-processed.tar.gz',
+        spm='MNE-spm-face.tar.bz2',
+        somato='MNE-somato-data.tar.gz',
+        testing='mne-testing-data-master.tar.gz',
+        fake='foo.tgz',
+    )
+    if archive_name is not None:
+        archive_names.update(archive_name)
+    folder_names = dict(
+        sample='MNE-sample-data',
+        spm='MNE-spm-face',
+        somato='MNE-somato-data',
+        brainstorm='MNE-brainstorm-data',
+        testing='MNE-testing-data',
+        fake='foo',
+    )
+    urls = dict(
+        sample="https://s3.amazonaws.com/mne-python/datasets/%s",
+        spm='https://s3.amazonaws.com/mne-python/datasets/%s',
+        somato='https://s3.amazonaws.com/mne-python/datasets/%s',
+        brainstorm='https://copy.com/ZTHXXFcuIZycvRoA/brainstorm/%s',
+        testing='https://github.com/mne-tools/mne-testing-data/archive/'
+                'master.tar.gz',
+        fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
+             'datasets/%s',
+    )
+    hashes = dict(
+        sample='f73186795af820428e5e8e779ce5bfcf',
+        spm='3e9e83c642136e5b720e2ecc5dcc3244',
+        somato='f3e3a8441477bb5bacae1d0c6e0964fb',
+        brainstorm=None,
+        testing=None,
+        fake='3194e9f7b46039bb050a74f3e1ae9908',
+    )
+    folder_origs = dict(  # not listed means None
+        testing='mne-testing-data-master',
+    )
+    folder_name = folder_names[name]
+    archive_name = archive_names[name]
+    hash_ = hashes[name]
+    url = urls[name]
+    folder_orig = folder_origs.get(name, None)
+    if '%s' in url:
+        url = url % archive_name
 
-        # use the same for all datasets
-        if not op.exists(path) or not os.access(path, os.W_OK):
-            try:
-                os.mkdir(path)
-            except OSError:
-                try:
-                    logger.info("Checking for dataset in '~/mne_data'...")
-                    path = op.join(op.expanduser("~"), "mne_data")
-                    if not op.exists(path):
-                        logger.info("Trying to create "
-                                    "'~/mne_data' in home directory")
-                        os.mkdir(path)
-                except OSError:
-                    raise OSError("User does not have write permissions "
-                                  "at '%s', try giving the path as an argument "
-                                  "to data_path() where user has write "
-                                  "permissions, for ex:data_path"
-                                  "('/home/xyz/me2/')" % (path))
+    folder_path = op.join(path, folder_name)
+    if name == 'brainstorm':
+        extract_path = folder_path
+        folder_path = op.join(folder_path, archive_names[name].split('.')[0])
 
-    if not isinstance(path, string_types):
-        raise ValueError('path must be a string or None')
-    if name == 'sample':
-        archive_name = "MNE-sample-data-processed.tar.gz"
-        url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/" + archive_name
-        folder_name = "MNE-sample-data"
-        folder_path = op.join(path, folder_name)
-    elif name == 'spm':
-        archive_name = 'MNE-spm-face.tar.bz2'
-        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
-        folder_name = "MNE-spm-face"
-        folder_path = op.join(path, folder_name)
-    elif name == 'somato':
-        archive_name = 'MNE-somato-data.tar.gz'
-        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
-        folder_name = "MNE-somato-data"
-        folder_path = op.join(path, folder_name)
-    else:
-        raise ValueError('Sorry, the dataset "%s" is not known.' % name)
     rm_archive = False
     martinos_path = '/cluster/fusion/sample_data/' + archive_name
     neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
+
     if not op.exists(folder_path) and not download:
         return ''
     if not op.exists(folder_path) or force_update:
+        if name == 'brainstorm':
+            answer = input('%sAgree (y/[n])? ' % _bst_license_text)
+            if answer.lower() != 'y':
+                raise RuntimeError('You must agree to the license to use this '
+                                   'dataset')
         logger.info('Downloading or reinstalling '
                     'data archive %s at location %s' % (archive_name, path))
 
@@ -138,55 +236,65 @@ def _data_path(path=None, force_update=False, update_path=True,
             fetch_archive = True
             if op.exists(archive_name):
                 msg = ('Archive already exists. Overwrite it (y/[n])? ')
-                answer = raw_input(msg)
+                answer = input(msg)
                 if answer.lower() == 'y':
                     os.remove(archive_name)
                 else:
                     fetch_archive = False
 
             if fetch_archive:
-                _fetch_file(url, archive_name, print_destination=False)
+                _fetch_file(url, archive_name, print_destination=False,
+                            hash_=hash_)
 
         if op.exists(folder_path):
-            shutil.rmtree(folder_path)
+            def onerror(func, path, exc_info):
+                """Deal with access errors (e.g. testing dataset read-only)"""
+                # Is the error an access error ?
+                do = False
+                if not os.access(path, os.W_OK):
+                    perm = os.stat(path).st_mode | stat.S_IWUSR
+                    os.chmod(path, perm)
+                    do = True
+                if not os.access(op.dirname(path), os.W_OK):
+                    dir_perm = (os.stat(op.dirname(path)).st_mode |
+                                stat.S_IWUSR)
+                    os.chmod(op.dirname(path), dir_perm)
+                    do = True
+                if do:
+                    func(path)
+                else:
+                    raise
+            shutil.rmtree(folder_path, onerror=onerror)
 
-        logger.info('Decompressing the archive: ' + archive_name)
-        logger.info('... please be patient, this can take some time')
+        logger.info('Decompressing the archive: %s' % archive_name)
+        logger.info('(please be patient, this can take some time)')
         for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
             try:
-                tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
+                if name != 'brainstorm':
+                    extract_path = path
+                tf = tarfile.open(archive_name, 'r:%s' % ext)
+                tf.extractall(path=extract_path)
+                tf.close()
                 break
             except tarfile.ReadError as err:
                 logger.info('%s is %s trying "bz2"' % (archive_name, err))
+        if folder_orig is not None:
+            shutil.move(op.join(path, folder_orig), folder_path)
 
         if rm_archive:
             os.remove(archive_name)
 
-    path = op.abspath(path)
-    if update_path is None:
-        if get_config(key, '') != path:
-            update_path = True
-            msg = ('Do you want to set the path:\n    %s\nas the default '
-                   'sample dataset path in the mne-python config [y]/n? '
-                   % path)
-            answer = raw_input(msg)
-            if answer.lower() == 'n':
-                update_path = False
-        else:
-            update_path = False
-
-    if update_path is True:
-        set_config(key, path)
-
+    path = _do_path_update(path, update_path, key, name)
     path = op.join(path, folder_name)
 
-    # compare the version of the Sample dataset and mne
+    # compare the version of the dataset and mne
     data_version = _dataset_version(path, name)
     try:
         from distutils.version import LooseVersion as LV
     except:
-        warn('Could not determine sample dataset version; dataset could\n'
-             'be out of date. Please install the "distutils" package.')
+        warn('Could not determine %s dataset version; dataset could\n'
+             'be out of date. Please install the "distutils" package.'
+             % name)
     else:  # 0.7 < 0.7.git shoud be False, therefore strip
         if check_version and LV(data_version) < LV(mne_version.strip('.git')):
             warn('The {name} dataset (version {current}) is older than '
@@ -194,14 +302,28 @@ def _data_path(path=None, force_update=False, update_path=True,
                  'you may need to update the {name} dataset by using '
                  'mne.datasets.{name}.data_path(force_update=True)'.format(
                      name=name, current=data_version, newest=mne_version))
+    return (path, data_version) if return_version else path
 
-    return path
+
+def _get_version(name):
+    """Helper to get a dataset version"""
+    if not has_dataset(name):
+        return None
+    return _data_path(name=name, return_version=True)[1]
 
 
 def has_dataset(name):
-    """Helper for sample dataset presence"""
+    """Helper for dataset presence"""
     endswith = {'sample': 'MNE-sample-data',
                 'spm': 'MNE-spm-face',
-                'somato': 'MNE-somato-data'}[name]
-    dp = _data_path(download=False, name=name, check_version=False)
+                'somato': 'MNE-somato-data',
+                'testing': 'MNE-testing-data',
+                'fake': 'foo',
+                'brainstorm': 'MNE_brainstorm-data',
+                }[name]
+    archive_name = None
+    if name == 'brainstorm':
+        archive_name = dict(brainstorm='bst_raw')
+    dp = _data_path(download=False, name=name, check_version=False,
+                    archive_name=archive_name)
     return dp.endswith(endswith)
diff --git a/mne/decoding/__init__.py b/mne/decoding/__init__.py
index b0cb320..d0f4e47 100644
--- a/mne/decoding/__init__.py
+++ b/mne/decoding/__init__.py
@@ -1,6 +1,7 @@
-from .classifier import Scaler, FilterEstimator
-from .classifier import PSDEstimator, ConcatenateChannels
+from .transformer import Scaler, FilterEstimator
+from .transformer import PSDEstimator, EpochsVectorizer, ConcatenateChannels
 from .mixin import TransformerMixin
+from .base import BaseEstimator, LinearModel
 from .csp import CSP
 from .ems import compute_ems
-from .time_gen import time_generalization
+from .time_gen import GeneralizationAcrossTime, TimeDecoding
diff --git a/mne/decoding/base.py b/mne/decoding/base.py
new file mode 100644
index 0000000..8f20732
--- /dev/null
+++ b/mne/decoding/base.py
@@ -0,0 +1,622 @@
+"""Base class copy from sklearn.base"""
+# Authors: Gael Varoquaux <gael.varoquaux at normalesup.org>
+#          Romain Trachel <trachelr at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License: BSD (3-clause)
+
+import inspect
+import warnings
+import six
+import numpy as np
+
+
+class BaseEstimator(object):
+    """Base class for all estimators in scikit-learn
+    Notes
+    -----
+    All estimators should specify all the parameters that can be set
+    at the class level in their ``__init__`` as explicit keyword
+    arguments (no ``*args`` or ``**kwargs``).
+    """
+
+    @classmethod
+    def _get_param_names(cls):
+        """Get parameter names for the estimator"""
+        # fetch the constructor or the original constructor before
+        # deprecation wrapping if any
+        init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
+        if init is object.__init__:
+            # No explicit constructor to introspect
+            return []
+
+        # introspect the constructor arguments to find the model parameters
+        # to represent
+        args, varargs, kw, default = inspect.getargspec(init)
+        if varargs is not None:
+            raise RuntimeError("scikit-learn estimators should always "
+                               "specify their parameters in the signature"
+                               " of their __init__ (no varargs)."
+                               " %s doesn't follow this convention."
+                               % (cls, ))
+        # Remove 'self'
+        # XXX: This is going to fail if the init is a staticmethod, but
+        # who would do this?
+        args.pop(0)
+        args.sort()
+        return args
+
+    def get_params(self, deep=True):
+        """Get parameters for this estimator.
+
+        Parameters
+        ----------
+        deep : boolean, optional
+            If True, will return the parameters for this estimator and
+            contained subobjects that are estimators.
+
+        Returns
+        -------
+        params : mapping of string to any
+            Parameter names mapped to their values.
+        """
+        out = dict()
+        for key in self._get_param_names():
+            # We need deprecation warnings to always be on in order to
+            # catch deprecated param values.
+            # This is set in utils/__init__.py but it gets overwritten
+            # when running under python3 somehow.
+            warnings.simplefilter("always", DeprecationWarning)
+            try:
+                with warnings.catch_warnings(record=True) as w:
+                    value = getattr(self, key, None)
+                if len(w) and w[0].category == DeprecationWarning:
+                    # if the parameter is deprecated, don't show it
+                    continue
+            finally:
+                warnings.filters.pop(0)
+
+            # XXX: should we rather test if instance of estimator?
+            if deep and hasattr(value, 'get_params'):
+                deep_items = value.get_params().items()
+                out.update((key + '__' + k, val) for k, val in deep_items)
+            out[key] = value
+        return out
+
+    def set_params(self, **params):
+        """Set the parameters of this estimator.
+        The method works on simple estimators as well as on nested objects
+        (such as pipelines). The former have parameters of the form
+        ``<component>__<parameter>`` so that it's possible to update each
+        component of a nested object.
+        Returns
+        -------
+        self
+        """
+        if not params:
+            # Simple optimisation to gain speed (inspect is slow)
+            return self
+        valid_params = self.get_params(deep=True)
+        for key, value in six.iteritems(params):
+            split = key.split('__', 1)
+            if len(split) > 1:
+                # nested objects case
+                name, sub_name = split
+                if name not in valid_params:
+                    raise ValueError('Invalid parameter %s for estimator %s. '
+                                     'Check the list of available parameters '
+                                     'with `estimator.get_params().keys()`.' %
+                                     (name, self))
+                sub_object = valid_params[name]
+                sub_object.set_params(**{sub_name: value})
+            else:
+                # simple objects case
+                if key not in valid_params:
+                    raise ValueError('Invalid parameter %s for estimator %s. '
+                                     'Check the list of available parameters '
+                                     'with `estimator.get_params().keys()`.' %
+                                     (key, self.__class__.__name__))
+                setattr(self, key, value)
+        return self
+
+    def __repr__(self):
+        class_name = self.__class__.__name__
+        return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
+                                               offset=len(class_name),),)
+
+
+###############################################################################
+def _pprint(params, offset=0, printer=repr):
+    """Pretty print the dictionary 'params'
+
+    Parameters
+    ----------
+    params: dict
+        The dictionary to pretty print
+    offset: int
+        The offset in characters to add at the begin of each line.
+    printer:
+        The function to convert entries to strings, typically
+        the builtin str or repr
+
+    """
+    # Do a multi-line justified repr:
+    options = np.get_printoptions()
+    np.set_printoptions(precision=5, threshold=64, edgeitems=2)
+    params_list = list()
+    this_line_length = offset
+    line_sep = ',\n' + (1 + offset // 2) * ' '
+    for i, (k, v) in enumerate(sorted(six.iteritems(params))):
+        if type(v) is float:
+            # use str for representing floating point numbers
+            # this way we get consistent representation across
+            # architectures and versions.
+            this_repr = '%s=%s' % (k, str(v))
+        else:
+            # use repr of the rest
+            this_repr = '%s=%s' % (k, printer(v))
+        if len(this_repr) > 500:
+            this_repr = this_repr[:300] + '...' + this_repr[-100:]
+        if i > 0:
+            if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
+                params_list.append(line_sep)
+                this_line_length = len(line_sep)
+            else:
+                params_list.append(', ')
+                this_line_length += 2
+        params_list.append(this_repr)
+        this_line_length += len(this_repr)
+
+    np.set_printoptions(**options)
+    lines = ''.join(params_list)
+    # Strip trailing space to avoid nightmare in doctests
+    lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
+    return lines
+
+
+class LinearModel(BaseEstimator):
+    """
+    This object clones a Linear Model from scikit-learn
+    and updates the attribute for each fit. The linear model coefficient
+    (filters) are used to extract discriminant neural sources from
+    the measured data. This class implement the computation of patterns
+    which provides neurophysiologically interpretable information [1],
+    in the sense that significant nonzero weights are only observed at channels
+    the activity of which is related to discriminant neural sources.
+
+    Parameters
+    ----------
+    model : object | None
+        A linear model from scikit-learn with a fit method
+        that updates a coef_ attribute.
+        If None the model will be a LogisticRegression
+
+    Attributes
+    ----------
+    filters_ : ndarray
+        If fit, the filters used to decompose the data, else None.
+    patterns_ : ndarray
+        If fit, the patterns used to restore M/EEG signals, else None.
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    See Also
+    --------
+    ICA
+    CSP
+    xDawn
+
+    References
+    ----------
+    [1] Haufe, S., Meinecke, F., Gorgen, K., Dahne, S., Haynes, J.-D.,
+    Blankertz, B., & Biebmann, F. (2014). On the interpretation of
+    weight vectors of linear models in multivariate neuroimaging.
+    NeuroImage, 87, 96-110.
+    """
+    def __init__(self, model=None):
+        if model is None:
+            from sklearn.linear_model import LogisticRegression
+            model = LogisticRegression()
+
+        self.model = model
+        self.patterns_ = None
+        self.filters_ = None
+
+    def fit(self, X, y):
+        """Estimate the coefficient of the linear model.
+        Save the coefficient in the attribute filters_ and
+        computes the attribute patterns_ using [1].
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to estimate the coeffiscient.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        self : instance of LinearModel
+            Returns the modified instance.
+
+        References
+        ----------
+        """
+        # fit the Model
+        self.model.fit(X, y)
+        # computes the patterns
+        assert hasattr(self.model, 'coef_'), \
+            "model needs a coef_ attribute to compute the patterns"
+        self.patterns_ = np.dot(X.T, np.dot(X, self.model.coef_.T))
+        self.filters_ = self.model.coef_
+
+        return self
+
+    def transform(self, X, y=None):
+        """Transform the data using the linear model.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            Predicted class label per epoch.
+
+        """
+        return self.model.transform(X)
+
+    def fit_transform(self, X, y):
+        """fit the data and transform it using the linear model.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            Predicted class label per epoch.
+
+        """
+        return self.fit(X, y).transform(X)
+
+    def predict(self, X):
+        """Computes prediction of X.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data used to compute prediction.
+
+        Returns
+        -------
+        y_pred : array, shape (n_epochs,)
+            The predictions.
+        """
+        return self.model.predict(X)
+
+    def score(self, X, y):
+        """
+        Returns the score of the linear model computed
+        on the given test data.
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_features)
+            The data to transform.
+        y : array, shape (n_epochs,)
+            The class for each epoch.
+
+        Returns
+        -------
+        score : float
+            Score of the linear model
+
+        """
+        return self.model.score(X, y)
+
+    def plot_patterns(self, info, times=None, ch_type=None, layout=None,
+                      vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                      colorbar=True, scale=None, scale_time=1e3, unit='a.u.',
+                      res=64, size=1, cbar_fmt='%3.1f',
+                      name_format='%01d ms', proj=False, show=True,
+                      show_names=False, title=None, mask=None,
+                      mask_params=None, outlines='head', contours=6,
+                      image_interp='bilinear', average=None, head_pos=None):
+        """
+        Plot topographic patterns of the linear model.
+        The patterns explain how the measured data was generated
+        from the neural sources (a.k.a. the forward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit the linear model.
+            If not possible, consider using ``create_info``.
+        times : float | array of floats | None.
+            The time point(s) to plot. If None, the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "%03f ms"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+
+        if times is None:
+            tmin = 0
+        else:
+            tmin = times[0]
+
+        # create an evoked
+        patterns = EvokedArray(self.patterns_.reshape(info['nchan'], -1),
+                               info, tmin=tmin)
+        # the call plot_topomap
+        return patterns.plot_topomap(times=times, ch_type=ch_type,
+                                     layout=layout, vmin=vmin, vmax=vmax,
+                                     cmap=cmap, colorbar=colorbar, res=res,
+                                     cbar_fmt=cbar_fmt, sensors=sensors,
+                                     scale=scale, scale_time=scale_time,
+                                     time_format=name_format, size=size,
+                                     show_names=show_names, unit=unit,
+                                     mask_params=mask_params,
+                                     mask=mask, outlines=outlines,
+                                     contours=contours, title=title,
+                                     image_interp=image_interp, show=show,
+                                     head_pos=head_pos)
+
+    def plot_filters(self, info, times=None, ch_type=None, layout=None,
+                     vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                     colorbar=True, scale=None, scale_time=1e3, unit='a.u.',
+                     res=64, size=1, cbar_fmt='%3.1f',
+                     name_format='%01d ms', proj=False, show=True,
+                     show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear', average=None, head_pos=None):
+        """
+        Plot topographic filters of the linear model.
+        The filters are used to extract discriminant neural sources from
+        the measured data (a.k.a. the backward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit the linear model.
+            If not possible, consider using ``create_info``.
+        times : float | array of floats | None.
+            The time point(s) to plot. If None, the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1e3.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "%03f ms"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+
+        if times is None:
+            tmin = 0
+        else:
+            tmin = times[0]
+
+        # create an evoked
+        filters = EvokedArray(self.filters_.T.reshape(info['nchan'], -1),
+                              info, tmin=tmin)
+        # the call plot_topomap
+        return filters.plot_topomap(times=times, ch_type=ch_type,
+                                    layout=layout, vmin=vmin, vmax=vmax,
+                                    cmap=cmap, colorbar=colorbar, res=res,
+                                    cbar_fmt=cbar_fmt, sensors=sensors,
+                                    scale=scale, scale_time=scale_time,
+                                    time_format=name_format, size=size,
+                                    show_names=show_names, unit=unit,
+                                    mask_params=mask_params,
+                                    mask=mask, outlines=outlines,
+                                    contours=contours, title=title,
+                                    image_interp=image_interp, show=show,
+                                    head_pos=head_pos)
diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py
index d84334e..0ff5eaa 100644
--- a/mne/decoding/csp.py
+++ b/mne/decoding/csp.py
@@ -1,17 +1,21 @@
-# Authors: Romain Trachel <romain.trachel at inria.fr>
+# Authors: Romain Trachel <trachelr at gmail.com>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Alexandre Barachant <alexandre.barachant at gmail.com>
 #
 # License: BSD (3-clause)
 
+import copy as cp
+import warnings
+
 import numpy as np
 from scipy import linalg
-from distutils.version import LooseVersion
 
 from .mixin import TransformerMixin
+from ..cov import _regularized_covariance
 
 
 class CSP(TransformerMixin):
-    """M/EEG signal decomposition using the Common Spatial Patterns (CSP)
+    """M/EEG signal decomposition using the Common Spatial Patterns (CSP).
 
     This object can be used as a supervised decomposition to estimate
     spatial filters for feature extraction in a 2 class decoding problem.
@@ -19,35 +23,44 @@ class CSP(TransformerMixin):
 
     Parameters
     ----------
-    n_components : int, default 4
+    n_components : int (default 4)
         The number of components to decompose M/EEG signals.
         This number should be set by cross-validation.
-    reg : float, str, None
+    reg : float | str | None (default None)
         if not None, allow regularization for covariance estimation
         if float, shrinkage covariance is used (0 <= shrinkage <= 1).
-        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('lws') or
-                Oracle Approximating Shrinkage ('oas')
-    log : bool
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+    log : bool (default True)
         If true, apply log to standardize the features.
         If false, features are just z-scored.
 
     Attributes
     ----------
-    `filters_` : ndarray
+    filters_ : ndarray, shape (n_channels, n_channels)
         If fit, the CSP components used to decompose the data, else None.
-    `patterns_` : ndarray
+    patterns_ : ndarray, shape (n_channels, n_channels)
         If fit, the CSP patterns used to restore M/EEG signals, else None.
-    `mean_` : ndarray
+    mean_ : ndarray, shape (n_channels,)
         If fit, the mean squared power for each component.
-    `std_` : ndarray
+    std_ : ndarray, shape (n_channels,)
         If fit, the std squared power for each component.
 
+    References
+    ----------
     [1] Zoltan J. Koles. The quantitative extraction and topographic mapping
     of the abnormal components in the clinical EEG. Electroencephalography
     and Clinical Neurophysiology, 79(6):440--447, December 1991.
     """
+
     def __init__(self, n_components=4, reg=None, log=True):
+        """Init of CSP."""
         self.n_components = n_components
+        if reg == 'lws':
+            warnings.warn('`lws` has been deprecated for the `reg`'
+                          ' argument. It will be removed in 0.11.'
+                          ' Use `ledoit_wolf` instead.', DeprecationWarning)
+            reg = 'ledoit_wolf'
         self.reg = reg
         self.log = log
         self.filters_ = None
@@ -60,9 +73,9 @@ class CSP(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : ndarray, shape (n_epochs, n_channels, n_times)
             The data to estimate the CSP on.
-        y : array
+        y : array, shape (n_epochs,)
             The class for each epoch.
 
         Returns
@@ -74,6 +87,9 @@ class CSP(TransformerMixin):
             raise ValueError("epochs_data should be of type ndarray (got %s)."
                              % type(epochs_data))
         epochs_data = np.atleast_3d(epochs_data)
+        # check number of epochs
+        if epochs_data.shape[0] != len(y):
+            raise ValueError("n_epochs must be the same for epochs_data and y")
         classes = np.unique(y)
         if len(classes) != 2:
             raise ValueError("More than two different classes in the data.")
@@ -82,60 +98,9 @@ class CSP(TransformerMixin):
                                [1, 0, 2]).reshape(epochs_data.shape[1], -1)
         class_2 = np.transpose(epochs_data[y == classes[1]],
                                [1, 0, 2]).reshape(epochs_data.shape[1], -1)
-        if self.reg is None:
-            # compute empirical covariance
-            cov_1 = np.dot(class_1, class_1.T)
-            cov_2 = np.dot(class_2, class_2.T)
-        else:
-            # use sklearn covariance estimators
-            if isinstance(self.reg, float):
-                if (self.reg < 0) or (self.reg > 1):
-                    raise ValueError('0 <= shrinkage <= 1 for '
-                                     'covariance regularization.')
-                try:
-                    import sklearn
-                    sklearn_version = LooseVersion(sklearn.__version__)
-                    from sklearn.covariance import ShrunkCovariance
-                except ImportError:
-                    raise Exception('the scikit-learn package is missing and '
-                                    'required for covariance regularization.')
-                if sklearn_version < '0.12':
-                    skl_cov = ShrunkCovariance(shrinkage=self.reg,
-                                               store_precision=False)
-                else:
-                    # init sklearn.covariance.ShrunkCovariance estimator
-                    skl_cov = ShrunkCovariance(shrinkage=self.reg,
-                                               store_precision=False,
-                                               assume_centered=True)
-            elif isinstance(self.reg, str):
-                if self.reg == 'lws':
-                    try:
-                        from sklearn.covariance import LedoitWolf
-                    except ImportError:
-                        raise Exception('the scikit-learn package is missing '
-                                        'and required for regularization.')
-                    # init sklearn.covariance.LedoitWolf estimator
-                    skl_cov = LedoitWolf(store_precision=False,
-                                         assume_centered=True)
-                elif self.reg == 'oas':
-                    try:
-                        from sklearn.covariance import OAS
-                    except ImportError:
-                        raise Exception('the scikit-learn package is missing '
-                                        'and required for regularization.')
-                    # init sklearn.covariance.OAS estimator
-                    skl_cov = OAS(store_precision=False,
-                                  assume_centered=True)
-                else:
-                    raise ValueError("regularization parameter should be "
-                                     "of type str (got %s)." % type(self.reg))
-            else:
-                raise ValueError("regularization parameter should be "
-                                 "of type str (got %s)." % type(self.reg))
-
-            # compute regularized covariance using sklearn
-            cov_1 = skl_cov.fit(class_1.T).covariance_
-            cov_2 = skl_cov.fit(class_2.T).covariance_
+
+        cov_1 = _regularized_covariance(class_1, reg=self.reg)
+        cov_2 = _regularized_covariance(class_2, reg=self.reg)
 
         # then fit on covariance
         self._fit(cov_1, cov_2)
@@ -153,8 +118,7 @@ class CSP(TransformerMixin):
         return self
 
     def _fit(self, cov_a, cov_b):
-        """Aux Function (modifies cov_a and cov_b in-place)"""
-
+        """Aux Function (modifies cov_a and cov_b in-place)."""
         cov_a /= np.trace(cov_a)
         cov_b /= np.trace(cov_b)
         # computes the eigen values
@@ -181,12 +145,15 @@ class CSP(TransformerMixin):
         self.patterns_ = linalg.pinv(w).T
 
     def transform(self, epochs_data, y=None):
-        """Estimate epochs sources given the CSP filters
+        """Estimate epochs sources given the CSP filters.
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
+        y : None
+            Not used.
+
         Returns
         -------
         X : ndarray of shape (n_epochs, n_sources)
@@ -210,3 +177,291 @@ class CSP(TransformerMixin):
             X -= self.mean_
             X /= self.std_
         return X
+
+    def plot_patterns(self, info, components=None, ch_type=None, layout=None,
+                      vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                      colorbar=True, scale=None, scale_time=1, unit=None,
+                      res=64, size=1, cbar_fmt='%3.1f',
+                      name_format='CSP%01d', proj=False, show=True,
+                      show_names=False, title=None, mask=None,
+                      mask_params=None, outlines='head', contours=6,
+                      image_interp='bilinear', average=None, head_pos=None):
+        """Plot topographic patterns of CSP components.
+
+        The CSP patterns explain how the measured data was generated
+        from the neural sources (a.k.a. the forward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit CSP.
+            If not possible, consider using ``create_info``.
+        components : float | array of floats | None.
+           The CSP patterns to plot. If None, n_components will be shown.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "CSP%01d"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+        if components is None:
+            components = np.arange(self.n_components)
+
+        # set sampling frequency to have 1 component per time point
+        info = cp.deepcopy(info)
+        info['sfreq'] = 1.
+        # create an evoked
+        patterns = EvokedArray(self.patterns_.T, info, tmin=0)
+        # the call plot_topomap
+        return patterns.plot_topomap(times=components, ch_type=ch_type,
+                                     layout=layout, vmin=vmin, vmax=vmax,
+                                     cmap=cmap, colorbar=colorbar, res=res,
+                                     cbar_fmt=cbar_fmt, sensors=sensors,
+                                     scale=1, scale_time=1, unit='a.u.',
+                                     time_format=name_format, size=size,
+                                     show_names=show_names,
+                                     mask_params=mask_params,
+                                     mask=mask, outlines=outlines,
+                                     contours=contours,
+                                     image_interp=image_interp, show=show,
+                                     head_pos=head_pos)
+
+    def plot_filters(self, info, components=None, ch_type=None, layout=None,
+                     vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
+                     colorbar=True, scale=None, scale_time=1, unit=None,
+                     res=64, size=1, cbar_fmt='%3.1f',
+                     name_format='CSP%01d', proj=False, show=True,
+                     show_names=False, title=None, mask=None,
+                     mask_params=None, outlines='head', contours=6,
+                     image_interp='bilinear', average=None, head_pos=None):
+        """Plot topographic filters of CSP components.
+
+        The CSP filters are used to extract discriminant neural sources from
+        the measured data (a.k.a. the backward model).
+
+        Parameters
+        ----------
+        info : instance of Info
+            Info dictionary of the epochs used to fit CSP.
+            If not possible, consider using ``create_info``.
+        components : float | array of floats | None.
+           The CSP patterns to plot. If None, n_components will be shown.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to be
+            specified for Neuromag data). If possible, the correct layout file
+            is inferred from the data; if no appropriate layout file was found
+            the layout is automatically generated from the sensor locations.
+        vmin : float | callable
+            The value specfying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable
+            The value specfying the upper bound of the color range.
+            If None, the maximum absolute value is used. If vmin is None,
+            but vmax is not, defaults to np.min(data).
+            If callable, the output equals vmax(data).
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        sensors : bool | str
+            Add markers for sensor locations to the plot. Accepts matplotlib
+            plot format string (e.g., 'r+' for red plusses). If True,
+            a circle will be used (via .add_artist). Defaults to True.
+        colorbar : bool
+            Plot a colorbar.
+        scale : dict | float | None
+            Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
+            for grad and 1e15 for mag.
+        scale_time : float | None
+            Scale the time labels. Defaults to 1.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
+        size : float
+            Side length per topomap in inches.
+        cbar_fmt : str
+            String format for colorbar values.
+        name_format : str
+            String format for topomap values. Defaults to "CSP%01d"
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display.
+            If 'interactive', a check box for reversible selection
+            of SSP projection vectors will be show.
+        show : bool
+            Show figure if True.
+        show_names : bool | callable
+            If True, show channel names on top of the map. If a callable is
+            passed, channel names will be formatted using the callable; e.g.,
+            to delete the prefix 'MEG ' from all channel names, pass the
+            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+            only significant sensors will be shown.
+        title : str | None
+            Title. If None (default), no title is displayed.
+        mask : ndarray of bool, shape (n_channels, n_times) | None
+            The channels to be marked as significant at a given time point.
+            Indicies set to `True` will be considered. Defaults to None.
+        mask_params : dict | None
+            Additional plotting parameters for plotting significant sensors.
+            Default (None) equals::
+
+                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                     linewidth=0, markersize=4)
+
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw.
+            If 0, no contours will be drawn.
+        image_interp : str
+            The image interpolation to be used.
+            All matplotlib options are accepted.
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head
+            should be relative to the electrode locations.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+           The figure.
+        """
+
+        from .. import EvokedArray
+        if components is None:
+            components = np.arange(self.n_components)
+
+        # set sampling frequency to have 1 component per time point
+        info = cp.deepcopy(info)
+        info['sfreq'] = 1.
+        # create an evoked
+        filters = EvokedArray(self.filters_, info, tmin=0)
+        # the call plot_topomap
+        return filters.plot_topomap(times=components, ch_type=ch_type,
+                                    layout=layout, vmin=vmin, vmax=vmax,
+                                    cmap=cmap, colorbar=colorbar, res=res,
+                                    cbar_fmt=cbar_fmt, sensors=sensors,
+                                    scale=1, scale_time=1, unit='a.u.',
+                                    time_format=name_format, size=size,
+                                    show_names=show_names,
+                                    mask_params=mask_params,
+                                    mask=mask, outlines=outlines,
+                                    contours=contours,
+                                    image_interp=image_interp, show=show,
+                                    head_pos=head_pos)
diff --git a/mne/decoding/ems.py b/mne/decoding/ems.py
index 7e74b9f..d41cdbc 100644
--- a/mne/decoding/ems.py
+++ b/mne/decoding/ems.py
@@ -12,7 +12,7 @@ from .. import pick_types, pick_info
 
 
 @verbose
-def compute_ems(epochs, conditions=None, picks=None, verbose=None, n_jobs=1):
+def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None):
     """Compute event-matched spatial filter on epochs
 
     This version operates on the entire time course. No time window needs to
diff --git a/mne/decoding/tests/test_csp.py b/mne/decoding/tests/test_csp.py
index 11bcb3d..6478567 100644
--- a/mne/decoding/tests/test_csp.py
+++ b/mne/decoding/tests/test_csp.py
@@ -1,5 +1,5 @@
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#         Romain Trachel <romain.trachel at inria.fr>
+#         Romain Trachel <trachelr at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -11,9 +11,7 @@ from numpy.testing import assert_array_almost_equal
 
 from mne import io, Epochs, read_events, pick_types
 from mne.decoding.csp import CSP
-from mne.utils import _TempDir, requires_sklearn
-
-tempdir = _TempDir()
+from mne.utils import requires_sklearn, slow_test
 
 data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
@@ -21,18 +19,19 @@ event_name = op.join(data_dir, 'test-eve.fif')
 
 tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
-start, stop = 0, 8  # if stop is too small pca may fail in some cases, but
-                    # we're okay on this file
+# if stop is too small pca may fail in some cases, but we're okay on this file
+start, stop = 0, 8
 
 
+ at slow_test
 def test_csp():
     """Test Common Spatial Patterns algorithm on epochs
     """
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
-    picks = picks[1:13:3]
+                       eog=False, exclude='bads')
+    picks = picks[2:9:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
     epochs_data = epochs.get_data()
@@ -42,6 +41,7 @@ def test_csp():
     csp = CSP(n_components=n_components)
 
     csp.fit(epochs_data, epochs.events[:, -1])
+
     y = epochs.events[:, -1]
     X = csp.fit_transform(epochs_data, y)
     assert_true(csp.filters_.shape == (n_channels, n_channels))
@@ -59,6 +59,17 @@ def test_csp():
     sources = csp.transform(epochs_data)
     assert_true(sources.shape[1] == n_components)
 
+    epochs.pick_types(meg='mag', copy=False)
+
+    # test plot patterns
+    components = np.arange(n_components)
+    csp.plot_patterns(epochs.info, components=components, res=12,
+                      show=False)
+
+    # test plot filters
+    csp.plot_filters(epochs.info, components=components, res=12,
+                     show=False)
+
 
 @requires_sklearn
 def test_regularized_csp():
@@ -67,7 +78,7 @@ def test_regularized_csp():
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
@@ -75,7 +86,7 @@ def test_regularized_csp():
     n_channels = epochs_data.shape[1]
 
     n_components = 3
-    reg_cov = [None, 0.05, 'lws', 'oas']
+    reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
     for reg in reg_cov:
         csp = CSP(n_components=n_components, reg=reg)
         csp.fit(epochs_data, epochs.events[:, -1])
diff --git a/mne/decoding/tests/test_ems.py b/mne/decoding/tests/test_ems.py
index 386de4a..e3abce6 100644
--- a/mne/decoding/tests/test_ems.py
+++ b/mne/decoding/tests/test_ems.py
@@ -7,11 +7,9 @@ import os.path as op
 from nose.tools import assert_equal, assert_raises
 
 from mne import io, Epochs, read_events, pick_types
-from mne.utils import _TempDir, requires_sklearn
+from mne.utils import requires_sklearn
 from mne.decoding import compute_ems
 
-tempdir = _TempDir()
-
 data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 curdir = op.join(op.dirname(__file__))
 
@@ -31,7 +29,7 @@ def test_ems():
     events = read_events(event_name)
     events[-2, 2] = 3
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
diff --git a/mne/decoding/tests/test_time_gen.py b/mne/decoding/tests/test_time_gen.py
index d84e42b..4fe1b0c 100644
--- a/mne/decoding/tests/test_time_gen.py
+++ b/mne/decoding/tests/test_time_gen.py
@@ -1,17 +1,20 @@
-# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Jean-Remi King <jeanremi.king at gmail.com>
 #
 # License: BSD (3-clause)
-
 import warnings
+import copy
 import os.path as op
 
-from nose.tools import assert_true
+from nose.tools import assert_equal, assert_true, assert_raises
+import numpy as np
+from numpy.testing import assert_array_equal
 
 from mne import io, Epochs, read_events, pick_types
-from mne.utils import _TempDir, requires_sklearn
-from mne.decoding import time_generalization
+from mne.utils import (requires_sklearn, requires_sklearn_0_15, slow_test,
+                       run_tests_if_main)
+from mne.decoding import GeneralizationAcrossTime, TimeDecoding
 
-tempdir = _TempDir()
 
 data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
@@ -19,26 +22,288 @@ event_name = op.join(data_dir, 'test-eve.fif')
 
 tmin, tmax = -0.2, 0.5
 event_id = dict(aud_l=1, vis_l=3)
+event_id_gen = dict(aud_l=2, vis_l=4)
 
 
- at requires_sklearn
-def test_time_generalization():
-    """Test time generalization decoding
-    """
+def make_epochs():
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
-                            eog=False, exclude='bads')
-    picks = picks[1:13:3]
+                       eog=False, exclude='bads')
+    picks = picks[0:2]
     decim = 30
 
-    with warnings.catch_warnings(record=True) as w:
+    # Test on time generalization within one condition
+    with warnings.catch_warnings(record=True):
         epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), preload=True, decim=decim)
+    return epochs
+
+
+ at slow_test
+ at requires_sklearn_0_15
+def test_generalization_across_time():
+    """Test time generalization decoding
+    """
+    from sklearn.svm import SVC
+    from sklearn.linear_model import RANSACRegressor, LinearRegression
+    from sklearn.preprocessing import LabelEncoder
+    from sklearn.metrics import mean_squared_error
+    from sklearn.cross_validation import LeaveOneLabelOut
+
+    epochs = make_epochs()
+
+    # Test default running
+    gat = GeneralizationAcrossTime(picks='foo')
+    assert_equal("<GAT | no fit, no prediction, no score>", "%s" % gat)
+    assert_raises(ValueError, gat.fit, epochs)
+    with warnings.catch_warnings(record=True):
+        # check classic fit + check manual picks
+        gat.picks = [0]
+        gat.fit(epochs)
+        # check optional y as array
+        gat.picks = None
+        gat.fit(epochs, y=epochs.events[:, 2])
+        # check optional y as list
+        gat.fit(epochs, y=epochs.events[:, 2].tolist())
+    assert_equal(len(gat.picks_), len(gat.ch_names), 1)
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), no "
+                 "prediction, no score>", '%s' % gat)
+    assert_equal(gat.ch_names, epochs.ch_names)
+    gat.predict(epochs)
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
+                 "predicted 14 epochs, no score>",
+                 "%s" % gat)
+    gat.score(epochs)
+    gat.score(epochs, y=epochs.events[:, 2])
+    gat.score(epochs, y=epochs.events[:, 2].tolist())
+    assert_equal("<GAT | fitted, start : -0.200 (s), stop : 0.499 (s), "
+                 "predicted 14 epochs,\n scored "
+                 "(accuracy_score)>", "%s" % gat)
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs, y=epochs.events[:, 2])
+
+    old_mode = gat.predict_mode
+    gat.predict_mode = 'super-foo-mode'
+    assert_raises(ValueError, gat.predict, epochs)
+    gat.predict_mode = old_mode
+
+    gat.score(epochs, y=epochs.events[:, 2])
+    assert_true("accuracy_score" in '%s' % gat.scorer_)
+    epochs2 = epochs.copy()
+
+    # check _DecodingTime class
+    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
+                 "0.047 (s), length: 0.047 (s), n_time_windows: 15>",
+                 "%s" % gat.train_times_)
+    assert_equal("<DecodingTime | start: -0.200 (s), stop: 0.499 (s), step: "
+                 "0.047 (s), length: 0.047 (s), n_time_windows: 15 x 15>",
+                 "%s" % gat.test_times_)
+
+    # the y-check
+    gat.predict_mode = 'mean-prediction'
+    epochs2.events[:, 2] += 10
+    gat_ = copy.deepcopy(gat)
+    assert_raises(ValueError, gat_.score, epochs2)
+    gat.predict_mode = 'cross-validation'
+
+    # Test basics
+    # --- number of trials
+    assert_true(gat.y_train_.shape[0] ==
+                gat.y_true_.shape[0] ==
+                len(gat.y_pred_[0][0]) == 14)
+    # ---  number of folds
+    assert_true(np.shape(gat.estimators_)[1] == gat.cv)
+    # ---  length training size
+    assert_true(len(gat.train_times_['slices']) == 15 ==
+                np.shape(gat.estimators_)[0])
+    # ---  length testing sizes
+    assert_true(len(gat.test_times_['slices']) == 15 ==
+                np.shape(gat.scores_)[0])
+    assert_true(len(gat.test_times_['slices'][0]) == 15 ==
+                np.shape(gat.scores_)[1])
+
+    # Test longer time window
+    gat = GeneralizationAcrossTime(train_times={'length': .100})
+    with warnings.catch_warnings(record=True):
+        gat2 = gat.fit(epochs)
+    assert_true(gat is gat2)  # return self
+    assert_true(hasattr(gat2, 'cv_'))
+    assert_true(gat2.cv_ != gat.cv)
+    scores = gat.score(epochs)
+    assert_true(isinstance(scores, list))  # type check
+    assert_equal(len(scores[0]), len(scores))  # shape check
+
+    assert_equal(len(gat.test_times_['slices'][0][0]), 2)
+    # Decim training steps
+    gat = GeneralizationAcrossTime(train_times={'step': .100})
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    gat.score(epochs)
+    assert_true(len(gat.scores_) == len(gat.estimators_) == 8)  # training time
+    assert_equal(len(gat.scores_[0]), 15)  # testing time
+
+    # Test start stop training & test cv without n_fold params
+    y_4classes = np.hstack((epochs.events[:7, 2], epochs.events[7:, 2] + 1))
+    gat = GeneralizationAcrossTime(cv=LeaveOneLabelOut(y_4classes),
+                                   train_times={'start': 0.090, 'stop': 0.250})
+    # predict without fit
+    assert_raises(RuntimeError, gat.predict, epochs)
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs, y=y_4classes)
+    gat.score(epochs)
+    assert_equal(len(gat.scores_), 4)
+    assert_equal(gat.train_times_['times'][0], epochs.times[6])
+    assert_equal(gat.train_times_['times'][-1], epochs.times[9])
+
+    # Test score without passing epochs & Test diagonal decoding
+    gat = GeneralizationAcrossTime(test_times='diagonal')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+    assert_raises(RuntimeError, gat.score)
+    gat.predict(epochs)
+    scores = gat.score()
+    assert_true(scores is gat.scores_)
+    assert_equal(np.shape(gat.scores_), (15, 1))
+    assert_array_equal([tim for ttime in gat.test_times_['times']
+                        for tim in ttime], gat.train_times_['times'])
+
+    # Test generalization across conditions
+    gat = GeneralizationAcrossTime(predict_mode='mean-prediction')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs[0:6])
+    gat.predict(epochs[7:])
+    gat.score(epochs[7:])
+
+    # Test training time parameters
+    gat_ = copy.deepcopy(gat)
+    # --- start stop outside time range
+    gat_.train_times = dict(start=-999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(start=999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+    # --- impossible slices
+    gat_.train_times = dict(step=.000001)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(length=.000001)
+    assert_raises(ValueError, gat_.fit, epochs)
+    gat_.train_times = dict(length=999.)
+    assert_raises(ValueError, gat_.fit, epochs)
+
+    # Test testing time parameters
+    # --- outside time range
+    gat.test_times = dict(start=-999.)
+    assert_raises(ValueError, gat.predict, epochs)
+    gat.test_times = dict(start=999.)
+    assert_raises(ValueError, gat.predict, epochs)
+    # --- impossible slices
+    gat.test_times = dict(step=.000001)
+    assert_raises(ValueError, gat.predict, epochs)
+    gat_ = copy.deepcopy(gat)
+    gat_.train_times_['length'] = .000001
+    gat_.test_times = dict(length=.000001)
+    assert_raises(ValueError, gat_.predict, epochs)
+    # --- test time region of interest
+    gat.test_times = dict(step=.150)
+    gat.predict(epochs)
+    assert_array_equal(np.shape(gat.y_pred_), (15, 5, 14, 1))
+    # --- silly value
+    gat.test_times = 'foo'
+    assert_raises(ValueError, gat.predict, epochs)
+    assert_raises(RuntimeError, gat.score)
+    # --- unmatched length between training and testing time
+    gat.test_times = dict(length=.150)
+    assert_raises(ValueError, gat.predict, epochs)
+
+    svc = SVC(C=1, kernel='linear', probability=True)
+    gat = GeneralizationAcrossTime(clf=svc, predict_mode='mean-prediction')
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    # sklearn needs it: c.f.
+    # https://github.com/scikit-learn/scikit-learn/issues/2723
+    # and http://bit.ly/1u7t8UT
+    assert_raises(ValueError, gat.score, epochs2)
+    gat.score(epochs)
+    scores = sum(scores, [])  # flatten
+    assert_true(0.0 <= np.min(scores) <= 1.0)
+    assert_true(0.0 <= np.max(scores) <= 1.0)
+
+    # Test that gets error if train on one dataset, test on another, and don't
+    # specify appropriate cv:
+    gat = GeneralizationAcrossTime()
+    with warnings.catch_warnings(record=True):
+        gat.fit(epochs)
+
+    gat.predict(epochs)
+    assert_raises(ValueError, gat.predict, epochs[:10])
+
+    # Check that still works with classifier that output y_pred with
+    # shape = (n_trials, 1) instead of (n_trials,)
+    gat = GeneralizationAcrossTime(clf=RANSACRegressor(LinearRegression()),
+                                   cv=2)
+    epochs.crop(None, epochs.times[2])
+    gat.fit(epochs)
+    gat.predict(epochs)
+
+    # Test combinations of complex scenarios
+    # 2 or more distinct classes
+    n_classes = [2, 4]  # 4 tested
+    # nicely ordered labels or not
+    le = LabelEncoder()
+    y = le.fit_transform(epochs.events[:, 2])
+    y[len(y) // 2:] += 2
+    ys = (y, y + 1000)
+    # Univariate and multivariate prediction
+    svc = SVC(C=1, kernel='linear')
+
+    class SVC_proba(SVC):
+        def predict(self, x):
+            probas = super(SVC_proba, self).predict_proba(x)
+            return probas[:, 0]
+
+    svcp = SVC_proba(C=1, kernel='linear', probability=True)
+    clfs = [svc, svcp]
+    scorers = [None, mean_squared_error]
+    # Test all combinations
+    for clf, scorer in zip(clfs, scorers):
+        for y in ys:
+            for n_class in n_classes:
+                y_ = y % n_class
+                with warnings.catch_warnings(record=True):
+                    gat = GeneralizationAcrossTime(cv=2, clf=clf,
+                                                   scorer=scorer)
+                    gat.fit(epochs, y=y_)
+                    gat.score(epochs, y=y_)
+
+
+ at requires_sklearn
+def test_decoding_time():
+    """Test TimeDecoding
+    """
+    epochs = make_epochs()
+    tg = TimeDecoding()
+    assert_equal("<TimeDecoding | no fit, no prediction, no score>", '%s' % tg)
+    assert_true(hasattr(tg, 'times'))
+    assert_true(not hasattr(tg, 'train_times'))
+    assert_true(not hasattr(tg, 'test_times'))
+    tg.fit(epochs)
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), no prediction, no score>", '%s' % tg)
+    assert_true(not hasattr(tg, 'train_times_'))
+    assert_true(not hasattr(tg, 'test_times_'))
+    assert_raises(RuntimeError, tg.score, epochs=None)
+    tg.predict(epochs)
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), predicted 14 epochs, no score>",
+                 '%s' % tg)
+    assert_array_equal(np.shape(tg.y_pred_), [15, 14, 1])
+    tg.score(epochs)
+    tg.score()
+    assert_array_equal(np.shape(tg.scores_), [15])
+    assert_equal("<TimeDecoding | fitted, start : -0.200 (s), stop : 0.499 "
+                 "(s), predicted 14 epochs,\n scored (accuracy_score)>",
+                 '%s' % tg)
 
-        epochs_list = [epochs[k] for k in event_id.keys()]
-        scores = time_generalization(epochs_list, cv=2, random_state=42)
-        n_times = len(epochs.times)
-        assert_true(scores.shape == (n_times, n_times))
-        assert_true(scores.max() <= 1.)
-        assert_true(scores.min() >= 0.)
+run_tests_if_main()
diff --git a/mne/decoding/tests/test_classifier.py b/mne/decoding/tests/test_transformer.py
similarity index 63%
rename from mne/decoding/tests/test_classifier.py
rename to mne/decoding/tests/test_transformer.py
index aaed3fb..87b862c 100644
--- a/mne/decoding/tests/test_classifier.py
+++ b/mne/decoding/tests/test_transformer.py
@@ -1,4 +1,5 @@
 # Author: Mainak Jas <mainak at neuro.hut.fi>
+#         Romain Trachel <trachelr at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -10,8 +11,8 @@ from nose.tools import assert_true, assert_raises
 from numpy.testing import assert_array_equal
 
 from mne import io, read_events, Epochs, pick_types
-from mne.decoding.classifier import Scaler, FilterEstimator
-from mne.decoding.classifier import PSDEstimator, ConcatenateChannels
+from mne.decoding import Scaler, FilterEstimator
+from mne.decoding import PSDEstimator, EpochsVectorizer
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -30,7 +31,7 @@ def test_scaler():
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
 
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -47,6 +48,11 @@ def test_scaler():
 
     assert_array_equal(X2, X)
 
+    # Test inverse_transform
+    with warnings.catch_warnings(record=True):  # invalid value in mult
+        Xi = scaler.inverse_transform(X, y)
+    assert_array_equal(epochs_data, Xi)
+
     # Test init exception
     assert_raises(ValueError, scaler.fit, epochs, y)
     assert_raises(ValueError, scaler.transform, epochs, y)
@@ -58,18 +64,34 @@ def test_filterestimator():
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
     epochs_data = epochs.get_data()
-    filt = FilterEstimator(epochs.info, 1, 40)
+
+    # Add tests for different combinations of l_freq and h_freq
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=40)
     y = epochs.events[:, -1]
     with warnings.catch_warnings(record=True):  # stop freq attenuation warning
         X = filt.fit_transform(epochs_data, y)
         assert_true(X.shape == epochs_data.shape)
         assert_array_equal(filt.fit(epochs_data, y).transform(epochs_data), X)
 
+    filt = FilterEstimator(epochs.info, l_freq=0, h_freq=40)
+    y = epochs.events[:, -1]
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        X = filt.fit_transform(epochs_data, y)
+
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=1)
+    y = epochs.events[:, -1]
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        assert_raises(ValueError, filt.fit_transform, epochs_data, y)
+
+    filt = FilterEstimator(epochs.info, l_freq=1, h_freq=None)
+    with warnings.catch_warnings(record=True):  # stop freq attenuation warning
+        X = filt.fit_transform(epochs_data, y)
+
     # Test init exception
     assert_raises(ValueError, filt.fit, epochs, y)
     assert_raises(ValueError, filt.transform, epochs, y)
@@ -81,7 +103,7 @@ def test_psdestimator():
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
@@ -98,32 +120,43 @@ def test_psdestimator():
     assert_raises(ValueError, psd.transform, epochs, y)
 
 
-def test_concatenatechannels():
-    """Test methods of ConcatenateChannels
+def test_epochs_vectorizer():
+    """Test methods of EpochsVectorizer
     """
     raw = io.Raw(raw_fname, preload=False)
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
-                            eog=False, exclude='bads')
+                       eog=False, exclude='bads')
     picks = picks[1:13:3]
-    with warnings.catch_warnings(record=True) as w:
+    with warnings.catch_warnings(record=True):
         epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), preload=True)
     epochs_data = epochs.get_data()
-    concat = ConcatenateChannels(epochs.info)
+    vector = EpochsVectorizer(epochs.info)
     y = epochs.events[:, -1]
-    X = concat.fit_transform(epochs_data, y)
+    X = vector.fit_transform(epochs_data, y)
 
     # Check data dimensions
     assert_true(X.shape[0] == epochs_data.shape[0])
     assert_true(X.shape[1] == epochs_data.shape[1] * epochs_data.shape[2])
 
-    assert_array_equal(concat.fit(epochs_data, y).transform(epochs_data), X)
+    assert_array_equal(vector.fit(epochs_data, y).transform(epochs_data), X)
 
     # Check if data is preserved
     n_times = epochs_data.shape[2]
     assert_array_equal(epochs_data[0, 0, 0:n_times], X[0, 0:n_times])
 
+    # Check inverse transform
+    Xi = vector.inverse_transform(X, y)
+    assert_true(Xi.shape[0] == epochs_data.shape[0])
+    assert_true(Xi.shape[1] == epochs_data.shape[1])
+    assert_array_equal(epochs_data[0, 0, 0:n_times], Xi[0, 0, 0:n_times])
+
+    # check if inverse transform works with different number of epochs
+    Xi = vector.inverse_transform(epochs_data[0], y)
+    assert_true(Xi.shape[1] == epochs_data.shape[1])
+    assert_true(Xi.shape[2] == epochs_data.shape[2])
+
     # Test init exception
-    assert_raises(ValueError, concat.fit, epochs, y)
-    assert_raises(ValueError, concat.transform, epochs, y)
+    assert_raises(ValueError, vector.fit, epochs, y)
+    assert_raises(ValueError, vector.transform, epochs, y)
diff --git a/mne/decoding/time_gen.py b/mne/decoding/time_gen.py
index c17c74d..5431653 100644
--- a/mne/decoding/time_gen.py
+++ b/mne/decoding/time_gen.py
@@ -1,123 +1,1287 @@
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# Authors: Jean-Remi King <jeanremi.king at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Clement Moutard <clement.moutard at gmail.com>
 #
 # License: BSD (3-clause)
 
 import numpy as np
+import copy
 
-from ..utils import logger, verbose
-from ..parallel import parallel_func
-from ..io.pick import channel_type, pick_types
+from ..io.pick import pick_types
+from ..viz.decoding import plot_gat_matrix, plot_gat_times
+from ..parallel import parallel_func, check_n_jobs
 
 
-def _time_gen_one_fold(clf, X, y, train, test, scoring):
-    """Aux function of time_generalization"""
-    from sklearn.metrics import SCORERS
-    n_times = X.shape[2]
-    scores = np.zeros((n_times, n_times))
-    scorer = SCORERS[scoring]
+class _DecodingTime(dict):
+    """A dictionary to configure the training times that has the following keys:
 
-    for t_train in range(n_times):
-        X_train = X[train, :, t_train]
-        clf.fit(X_train, y[train])
-        for t_test in range(n_times):
-            X_test = X[test, :, t_test]
-            scores[t_test, t_train] += scorer(clf, X_test, y[test])
+    'slices' : ndarray, shape (n_clfs,)
+        Array of time slices (in indices) used for each classifier.
+        If not given, computed from 'start', 'stop', 'length', 'step'.
+    'start' : float
+        Time at which to start decoding (in seconds).
+        Defaults to min(epochs.times).
+    'stop' : float
+        Maximal time at which to stop decoding (in seconds).
+        Defaults to max(times).
+    'step' : float
+        Duration separating the start of subsequent classifiers (in
+        seconds). Defaults to one time sample.
+    'length' : float
+        Duration of each classifier (in seconds). Defaults to one time sample.
+    If None, empty dict. """
 
-    return scores
+    def __repr__(self):
+        s = ""
+        if "start" in self:
+            s += "start: %0.3f (s)" % (self["start"])
+        if "stop" in self:
+            s += ", stop: %0.3f (s)" % (self["stop"])
+        if "step" in self:
+            s += ", step: %0.3f (s)" % (self["step"])
+        if "length" in self:
+            s += ", length: %0.3f (s)" % (self["length"])
+        if "slices" in self:
+            # identify depth: training times only contains n_time but
+            # testing_times can contain n_times or n_times * m_times
+            depth = [len(ii) for ii in self["slices"]]
+            if len(np.unique(depth)) == 1:  # if all slices have same depth
+                if depth[0] == 1:  # if depth is one
+                    s += ", n_time_windows: %s" % (len(depth))
+                else:
+                    s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
+            else:
+                s += (", n_time_windows: %s x [%s, %s]" %
+                      (len(depth),
+                       min([len(ii) for ii in depth]),
+                       max(([len(ii) for ii in depth]))))
+        return "<DecodingTime | %s>" % s
 
 
- at verbose
-def time_generalization(epochs_list, clf=None, cv=5, scoring="roc_auc",
-                        shuffle=True, random_state=None, n_jobs=1,
-                        verbose=None):
-    """Fit decoder at each time instant and test at all others
+class _GeneralizationAcrossTime(object):
+    """ see GeneralizationAcrossTime
+    """  # noqa
+    def __init__(self, picks=None, cv=5, clf=None, train_times=None,
+                 test_times=None, predict_mode='cross-validation', scorer=None,
+                 n_jobs=1):
 
-    The function returns the cross-validation scores when the train set
-    is from one time instant and the test from all others.
+        from sklearn.preprocessing import StandardScaler
+        from sklearn.linear_model import LogisticRegression
+        from sklearn.pipeline import Pipeline
 
-    The decoding will be done using all available data channels, but
-    will only work if 1 type of channel is availalble. For example
-    epochs should contain only gradiometers.
+        # Store parameters in object
+        self.cv = cv
+        # Define training sliding window
+        self.train_times = (_DecodingTime() if train_times is None
+                            else _DecodingTime(train_times))
+        # Define testing sliding window. If None, will be set in predict()
+        if test_times is None:
+            self.test_times = _DecodingTime()
+        elif test_times == 'diagonal':
+            self.test_times = 'diagonal'
+        else:
+            self.test_times = _DecodingTime(test_times)
+
+        # Default classification pipeline
+        if clf is None:
+            scaler = StandardScaler()
+            estimator = LogisticRegression()
+            clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
+        self.clf = clf
+        self.predict_mode = predict_mode
+        self.scorer = scorer
+        self.picks = picks
+        self.n_jobs = n_jobs
+
+    def fit(self, epochs, y=None):
+        """ Train a classifier on each specified time slice.
+
+        Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
+        ``y_train``, ``train_times_`` and ``estimators_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs.
+        y : list or ndarray of int, shape (n_samples,) or None, optional
+            To-be-fitted model values. If None, y = epochs.events[:, 2].
+
+        Returns
+        -------
+        self : GeneralizationAcrossTime
+            Returns fitted GeneralizationAcrossTime object.
+
+        Notes
+        ------
+        If X and y are not C-ordered and contiguous arrays of np.float64 and
+        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
+
+        If X is a dense array, then the other methods will not support sparse
+        matrices as input.
+        """
+        from sklearn.base import clone
+        from sklearn.cross_validation import check_cv, StratifiedKFold
+
+        # clean attributes
+        for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
+                    'estimators_', 'test_times_', 'y_pred_', 'y_true_',
+                    'scores_', 'scorer_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        n_jobs = self.n_jobs
+        # Extract data from MNE structure
+        X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
+        self.ch_names = [epochs.ch_names[p] for p in self.picks_]
+
+        cv = self.cv
+        if isinstance(cv, (int, np.int)):
+            cv = StratifiedKFold(y, cv)
+        cv = check_cv(cv, X, y, classifier=True)
+        self.cv_ = cv  # update CV
+
+        self.y_train_ = y
+
+        # Cross validation scheme
+        # XXX Cross validation should later be transformed into a make_cv, and
+        # defined in __init__
+        self.train_times_ = copy.deepcopy(self.train_times)
+        if 'slices' not in self.train_times_:
+            self.train_times_ = _sliding_window(epochs.times, self.train_times)
+
+        # Parallel across training time
+        # TODO: JRK: Chunking times points needs to be simplified
+        parallel, p_time_gen, n_jobs = parallel_func(_fit_slices, n_jobs)
+        n_chunks = min(len(self.train_times_['slices']), n_jobs)
+        splits = np.array_split(self.train_times_['slices'], n_chunks)
+
+        def f(x):
+            return np.unique(np.concatenate(x))
+
+        out = parallel(p_time_gen(clone(self.clf),
+                                  X[..., f(train_slices_chunk)],
+                                  y, train_slices_chunk, cv)
+                       for train_slices_chunk in splits)
+        # Unpack estimators into time slices X folds list of lists.
+        self.estimators_ = sum(out, list())
+        return self
+
+    def predict(self, epochs):
+        """ Test each classifier on each specified testing time slice.
+
+        .. note:: This function sets the ``y_pred_`` and ``test_times_``
+                  attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs. Can be similar to fitted epochs or not. See
+            predict_mode parameter.
+
+        Returns
+        -------
+        y_pred : list of lists of arrays of floats, shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
+            The single-trial predictions at each training time and each testing
+            time. Note that the number of testing times per training time need
+            not be regular; else
+            ``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs)``.
+        """  # noqa
+
+        # Check that at least one classifier has been trained
+        if not hasattr(self, 'estimators_'):
+            raise RuntimeError('Please fit models before trying to predict')
+
+        # clean attributes
+        for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        n_jobs = self.n_jobs
+
+        X, y, _ = _check_epochs_input(epochs, None, self.picks_)
+
+        # Define testing sliding window
+        if self.test_times == 'diagonal':
+            test_times = _DecodingTime()
+            test_times['slices'] = [[s] for s in self.train_times_['slices']]
+            test_times['times'] = [[s] for s in self.train_times_['times']]
+        elif isinstance(self.test_times, dict):
+            test_times = copy.deepcopy(self.test_times)
+        else:
+            raise ValueError('`test_times` must be a dict or "diagonal"')
+
+        if 'slices' not in test_times:
+            # Check that same number of time sample in testing than in training
+            # (otherwise it won 't be the same number of features')
+            if 'length' not in test_times:
+                test_times['length'] = self.train_times_['length']
+            if test_times['length'] != self.train_times_['length']:
+                raise ValueError('`train_times` and `test_times` must have '
+                                 'identical `length` keys')
+            # Make a sliding window for each training time.
+            slices_list = list()
+            times_list = list()
+            for t in range(0, len(self.train_times_['slices'])):
+                test_times_ = _sliding_window(epochs.times, test_times)
+                times_list += [test_times_['times']]
+                slices_list += [test_times_['slices']]
+            test_times = test_times_
+            test_times['slices'] = slices_list
+            test_times['times'] = times_list
+
+        # Store all testing times parameters
+        self.test_times_ = test_times
+
+        # Prepare parallel predictions across time points
+        # FIXME Note that this means that TimeDecoding.predict isn't parallel
+        parallel, p_time_gen, n_jobs = parallel_func(_predict_slices, n_jobs)
+        n_test_slice = max([len(sl) for sl in self.train_times_['slices']])
+        # Loop across estimators (i.e. training times)
+        n_chunks = min(n_test_slice, n_jobs)
+        splits = [np.array_split(slices, n_chunks)
+                  for slices in self.test_times_['slices']]
+        splits = map(list, zip(*splits))
+
+        def chunk_X(X, slices):
+            """Smart chunking to avoid memory overload"""
+            # from object array to list
+            slices = [sl for sl in slices if len(sl)]
+            start = np.min(slices)
+            stop = np.max(slices) + 1
+            slices_ = np.array(slices) - start
+            X_ = X[:, :, start:stop]
+            return (X_, self.estimators_, self.cv_, slices_.tolist(),
+                    self.predict_mode)
+
+        y_pred = parallel(p_time_gen(*chunk_X(X, slices))
+                          for slices in splits)
+
+        # concatenate chunks across test time dimension. Don't use
+        # np.concatenate as this would need new memory allocations
+        self.y_pred_ = [[test for chunk in train for test in chunk]
+                        for train in map(list, zip(*y_pred))]
+        return self.y_pred_
+
+    def score(self, epochs=None, y=None):
+        """Score Epochs
+
+        Estimate scores across trials by comparing the prediction estimated for
+        each trial to its true value.
+
+        Calls ``predict()`` if it has not been already.
+
+        Note. The function updates the ``scorer_``, ``scores_``, and
+        ``y_true_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs | None, optional
+            The epochs. Can be similar to fitted epochs or not.
+            If None, it needs to rely on the predictions ``y_pred_``
+            generated with ``predict()``.
+        y : list | ndarray, shape (n_epochs,) | None, optional
+            True values to be compared with the predictions ``y_pred_``
+            generated with ``predict()`` via ``scorer_``.
+            If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
+
+        Returns
+        -------
+        scores : list of lists of float
+            The scores estimated by ``scorer_`` at each training time and each
+            testing time (e.g. mean accuracy of ``predict(X)``). Note that the
+            number of testing times per training time need not be regular;
+            else, np.shape(scores) = (n_train_time, n_test_time).
+        """
+        from sklearn.metrics import accuracy_score
+
+        # Run predictions if not already done
+        if epochs is not None:
+            self.predict(epochs)
+        else:
+            if not hasattr(self, 'y_pred_'):
+                raise RuntimeError('Please predict() epochs first or pass '
+                                   'epochs to score()')
+
+        # clean gat.score() attributes
+        for att in ['scores_', 'scorer_', 'y_true_']:
+            if hasattr(self, att):
+                delattr(self, att)
+
+        # Check scorer
+        # XXX Need API to identify proper scorer from the clf
+        self.scorer_ = accuracy_score if self.scorer is None else self.scorer
+
+        # If no regressor is passed, use default epochs events
+        if y is None:
+            if self.predict_mode == 'cross-validation':
+                y = self.y_train_
+            else:
+                if epochs is not None:
+                    y = epochs.events[:, 2]
+                else:
+                    raise RuntimeError('y is undefined because '
+                                       'predict_mode="mean-prediction" and '
+                                       'epochs are missing. You need to '
+                                       'explicitly specify y.')
+            if not np.all(np.unique(y) == np.unique(self.y_train_)):
+                raise ValueError('Classes (y) passed differ from classes used '
+                                 'for training. Please explicitly pass your y '
+                                 'for scoring.')
+        elif isinstance(y, list):
+            y = np.array(y)
+        self.y_true_ = y  # to be compared with y_pred for scoring
+
+        # Preprocessing for parallelization
+        n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
+        parallel, p_time_gen, n_jobs = parallel_func(_score_slices, n_jobs)
+        n_estimators = len(self.train_times_['slices'])
+        n_chunks = min(n_estimators, n_jobs)
+        splits = np.array_split(range(len(self.train_times_['slices'])),
+                                n_chunks)
+        scores = parallel(
+            p_time_gen(self.y_true_,
+                       [self.y_pred_[train] for train in split],
+                       self.scorer_)
+            for split in splits)
+
+        self.scores_ = [score for chunk in scores for score in chunk]
+        return self.scores_
+
+
+def _predict_slices(X, estimators, cv, slices, predict_mode):
+    """Aux function of GeneralizationAcrossTime that loops across chunks of
+    testing slices.
+    """
+    out = list()
+    for this_estimator, this_slice in zip(estimators, slices):
+        out.append(_predict_time_loop(X, this_estimator, cv, this_slice,
+                                      predict_mode))
+    return out
+
+
+def _predict_time_loop(X, estimators, cv, slices, predict_mode):
+    """Aux function of GeneralizationAcrossTime
+
+    Run classifiers predictions loop across time samples.
 
     Parameters
     ----------
-    epochs_list : list of Epochs
-        The epochs in all the conditions.
-    clf : object | None
-        A object following scikit-learn estimator API (fit & predict).
-        If None the classifier will be a linear SVM (C=1.) after
-        feature standardization.
-    cv : integer or cross-validation generator, optional
-        If an integer is passed, it is the number of fold (default 5).
-        Specific cross-validation objects can be passed, see
-        sklearn.cross_validation module for the list of possible objects.
-    scoring : {string, callable, None}, optional, default: "roc_auc"
-        A string (see model evaluation documentation in scikit-learn) or
-        a scorer callable object / function with signature
-        ``scorer(estimator, X, y)``.
-    shuffle : bool
-        If True, shuffle the epochs before splitting them in folds.
-    random_state : None | int
-        The random state used to shuffle the epochs. Ignored if
-        shuffle is False.
-    n_jobs : int
-        Number of jobs to run in parallel. Each fold is fit
-        in parallel.
+    X : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-fitted data.
+    estimators : array-like, shape (n_times, n_folds)
+        Array of scikit-learn classifiers fitted in cross-validation.
+    slices : list
+        List of slices selecting data from X from which is prediction is
+        generated.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+            'cross-validation' : estimates a single prediction per sample based
+                on the unique independent classifier fitted in the cross-
+                validation.
+            'mean-prediction' : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+        Default: 'cross-validation'
+    """
+    n_epochs = len(X)
+    # Loop across testing slices
+    y_pred = [list() for _ in range(len(slices))]
+
+    # XXX EHN: This loop should be parallelized in a similar way to fit()
+    for t, indices in enumerate(slices):
+        # Flatten features in case of multiple time samples
+        Xtrain = X[:, :, indices].reshape(
+            n_epochs, np.prod(X[:, :, indices].shape[1:]))
+
+        # Single trial predictions
+        if predict_mode == 'cross-validation':
+            # If predict within cross validation, only predict with
+            # corresponding classifier, else predict with each fold's
+            # classifier and average prediction.
+
+            # Check that training cv and predicting cv match
+            if (len(estimators) != len(cv)) or (cv.n != Xtrain.shape[0]):
+                raise ValueError(
+                    'When `predict_mode = "cross-validation"`, the training '
+                    'and predicting cv schemes must be identical.')
+            for k, (train, test) in enumerate(cv):
+                # XXX I didn't manage to initialize correctly this array, as
+                # its size depends on the the type of predictor and the
+                # number of class.
+                if k == 0:
+                    y_pred_ = _predict(Xtrain[test, :], estimators[k:k + 1])
+                    y_pred[t] = np.empty((n_epochs, y_pred_.shape[1]))
+                    y_pred[t][test, :] = y_pred_
+                y_pred[t][test, :] = _predict(Xtrain[test, :],
+                                              estimators[k:k + 1])
+        elif predict_mode == 'mean-prediction':
+            y_pred[t] = _predict(Xtrain, estimators)
+        else:
+            raise ValueError('`predict_mode` must be a str, "mean-prediction"'
+                             ' or "cross-validation"')
+    return y_pred
+
+
+def _score_slices(y_true, list_y_pred, scorer):
+    """Aux function of GeneralizationAcrossTime that loops across chunks of
+    testing slices.
+    """
+    scores_list = list()
+    for y_pred in list_y_pred:
+        scores = list()
+        for t, this_y_pred in enumerate(y_pred):
+            # Scores across trials
+            scores.append(scorer(y_true, np.array(this_y_pred)))
+        scores_list.append(scores)
+    return scores_list
+
+
+def _check_epochs_input(epochs, y, picks=None):
+    """Aux function of GeneralizationAcrossTime
+
+    Format MNE data into scikit-learn X and y
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+            The epochs.
+    y : ndarray shape (n_epochs) | list shape (n_epochs) | None
+        To-be-fitted model. If y is None, y == epochs.events.
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+
+    Returns
+    -------
+    X : ndarray, shape (n_epochs, n_selected_chans, n_times)
+        To-be-fitted data.
+    y : ndarray, shape (n_epochs,)
+        To-be-fitted model.
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    """
+    if y is None:
+        y = epochs.events[:, 2]
+    elif isinstance(y, list):
+        y = np.array(y)
+
+    # Convert MNE data into trials x features x time matrix
+    X = epochs.get_data()
+
+    # Pick channels
+    if picks is None:  # just use good data channels
+        picks = pick_types(epochs.info, meg=True, eeg=True, seeg=True,
+                           eog=False, ecg=False, misc=False, stim=False,
+                           ref_meg=False, exclude='bads')
+    if isinstance(picks, (list, np.ndarray)):
+        picks = np.array(picks, dtype=np.int)
+    else:
+        raise ValueError('picks must be a list or a numpy.ndarray of int')
+    X = X[:, picks, :]
+
+    # Check data sets
+    assert X.shape[0] == y.shape[0]
+    return X, y, picks
+
+
+def _fit_slices(clf, x_chunk, y, slices, cv):
+    """Aux function of GeneralizationAcrossTime
+
+    Fit each classifier.
+
+    Parameters
+    ----------
+    clf : scikit-learn classifier
+        The classifier object.
+    x_chunk : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-fitted data.
+    y : list | array, shape (n_epochs,)
+        To-be-fitted model.
+    slices : list | array, shape (n_training_slice,)
+        List of training slices, indicating time sample relative to X
+    cv : scikit-learn cross-validation generator
+        A cross-validation generator to use.
+
+    Returns
+    -------
+    estimators : list of lists of estimators
+        List of fitted scikit-learn classifiers corresponding to each training
+        slice.
+    """
+    from sklearn.base import clone
+    # Initialize
+    n_epochs = len(x_chunk)
+    estimators = list()
+    # Identify the time samples of X_chunck corresponding to X
+    values = np.unique(np.concatenate(slices))
+    indices = range(len(values))
+    # Loop across time slices
+    for t_slice in slices:
+        # Translate absolute time samples into time sample relative to x_chunk
+        for ii in indices:
+            t_slice[t_slice == values[ii]] = indices[ii]
+        # Select slice
+        X = x_chunk[..., t_slice]
+        # Reshape data matrix to flatten features in case of multiple time
+        # samples.
+        X = X.reshape(n_epochs, np.prod(X.shape[1:]))
+        # Loop across folds
+        estimators_ = list()
+        for fold, (train, test) in enumerate(cv):
+            # Fit classifier
+            clf_ = clone(clf)
+            clf_.fit(X[train, :], y[train])
+            estimators_.append(clf_)
+        # Store classifier
+        estimators.append(estimators_)
+    return estimators
+
+
+def _sliding_window(times, window_params):
+    """Aux function of GeneralizationAcrossTime
+
+    Define the slices on which to train each classifier.
+
+    Parameters
+    ----------
+    times : ndarray, shape (n_times,)
+        Array of times from MNE epochs.
+    window_params : dict keys: ('start', 'stop', 'step', 'length')
+        Either train or test times. See GAT documentation.
+
+    Returns
+    -------
+    time_pick : list
+        List of training slices, indicating for each classifier the time
+        sample (in indices of times) to be fitted on.
+    """
+
+    window_params = _DecodingTime(window_params)
+
+    # Sampling frequency as int
+    freq = (times[-1] - times[0]) / len(times)
+
+    # Default values
+    if ('slices' in window_params and
+            all(k in window_params for k in
+                ('start', 'stop', 'step', 'length'))):
+        time_pick = window_params['slices']
+    else:
+        if 'start' not in window_params:
+            window_params['start'] = times[0]
+        if 'stop' not in window_params:
+            window_params['stop'] = times[-1]
+        if 'step' not in window_params:
+            window_params['step'] = freq
+        if 'length' not in window_params:
+            window_params['length'] = freq
+
+        if (window_params['start'] < times[0] or
+                window_params['start'] > times[-1]):
+            raise ValueError(
+                '`start` (%.2f s) outside time range [%.2f, %.2f].' % (
+                    window_params['start'], times[0], times[-1]))
+        if (window_params['stop'] < times[0] or
+                window_params['stop'] > times[-1]):
+            raise ValueError(
+                '`stop` (%.2f s) outside time range [%.2f, %.2f].' % (
+                    window_params['stop'], times[0], times[-1]))
+        if window_params['step'] < freq:
+            raise ValueError('`step` must be >= 1 / sampling_frequency')
+        if window_params['length'] < freq:
+            raise ValueError('`length` must be >= 1 / sampling_frequency')
+        if window_params['length'] > np.ptp(times):
+            raise ValueError('`length` must be <= time range')
+
+        # Convert seconds to index
+
+        def find_time_idx(t):  # find closest time point
+            return np.argmin(np.abs(np.asarray(times) - t))
 
+        start = find_time_idx(window_params['start'])
+        stop = find_time_idx(window_params['stop'])
+        step = int(round(window_params['step'] / freq))
+        length = int(round(window_params['length'] / freq))
+
+        # For each training slice, give time samples to be included
+        time_pick = [range(start, start + length)]
+        while (time_pick[-1][0] + step) <= (stop - length + 1):
+            start = time_pick[-1][0] + step
+            time_pick.append(range(start, start + length))
+        window_params['slices'] = time_pick
+
+    # Keep last training times in milliseconds
+    t_inds_ = [t[-1] for t in window_params['slices']]
+    window_params['times'] = times[t_inds_]
+
+    return window_params
+
+
+def _predict(X, estimators):
+    """Aux function of GeneralizationAcrossTime
+
+    Predict each classifier. If multiple classifiers are passed, average
+    prediction across all classifiers to result in a single prediction per
+    classifier.
+
+    Parameters
+    ----------
+    estimators : ndarray, shape (n_folds,) | shape (1,)
+        Array of scikit-learn classifiers to predict data.
+    X : ndarray, shape (n_epochs, n_features, n_times)
+        To-be-predicted data
     Returns
     -------
-    scores : array, shape (n_times, n_times)
-        The scores averaged across folds. scores[i, j] contains
-        the generalization score when learning at time j and testing
-        at time i. The diagonal is the cross-validation score
-        at each time-independant instant.
+    y_pred : ndarray, shape (n_epochs, m_prediction_dimensions)
+        Classifier's prediction for each trial.
+    """
+    from scipy import stats
+    from sklearn.base import is_classifier
+    # Initialize results:
+    n_epochs = X.shape[0]
+    n_clf = len(estimators)
+
+    # Compute prediction for each sub-estimator (i.e. per fold)
+    # if independent, estimators = all folds
+    for fold, clf in enumerate(estimators):
+        _y_pred = clf.predict(X)
+        # See inconsistency in dimensionality: scikit-learn/scikit-learn#5058
+        if _y_pred.ndim == 1:
+            _y_pred = _y_pred[:, None]
+        # initialize predict_results array
+        if fold == 0:
+            predict_size = _y_pred.shape[1]
+            y_pred = np.ones((n_epochs, predict_size, n_clf))
+        y_pred[:, :, fold] = _y_pred
+
+    # Collapse y_pred across folds if necessary (i.e. if independent)
+    if fold > 0:
+        # XXX need API to identify how multiple predictions can be combined?
+        if is_classifier(clf):
+            y_pred, _ = stats.mode(y_pred, axis=2)
+        else:
+            y_pred = np.mean(y_pred, axis=2)
+
+    # Format shape
+    y_pred = y_pred.reshape((n_epochs, predict_size))
+    return y_pred
+
+
+class GeneralizationAcrossTime(_GeneralizationAcrossTime):
+    """Generalize across time and conditions
+
+    Creates and estimator object used to 1) fit a series of classifiers on
+    multidimensional time-resolved data, and 2) test the ability of each
+    classifier to generalize across other time samples.
+
+    Parameters
+    ----------
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    cv : int | object
+        If an integer is passed, it is the number of folds.
+        Specific cross-validation objects can be passed, see
+        scikit-learn.cross_validation module for the list of possible objects.
+        Defaults to 5.
+    clf : object | None
+        An estimator compliant with the scikit-learn API (fit & predict).
+        If None the classifier will be a standard pipeline including
+        StandardScaler and LogisticRegression with default parameters.
+    train_times : dict | None
+        A dictionary to configure the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``start`` : float
+                Time at which to start decoding (in seconds).
+                Defaults to min(epochs.times).
+            ``stop`` : float
+                Maximal time at which to stop decoding (in seconds).
+                Defaults to max(times).
+            ``step`` : float
+                Duration separating the start of subsequent classifiers (in
+                seconds). Defaults to one time sample.
+            ``length`` : float
+                Duration of each classifier (in seconds).
+                Defaults to one time sample.
+
+        If None, empty dict.
+    test_times : 'diagonal' | dict | None, optional
+        Configures the testing times.
+        If set to 'diagonal', predictions are made at the time at which
+        each classifier is trained.
+        If set to None, predictions are made at all time points.
+        If set to dict, the dict should contain ``slices`` or be contructed in
+        a similar way to train_times::
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+
+        If None, empty dict.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+
+            ``cross-validation`` : estimates a single prediction per sample
+                based on the unique independent classifier fitted in the
+                cross-validation.
+            ``mean-prediction`` : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+
+        Default: 'cross-validation'
+    scorer : object | None
+        scikit-learn Scorer instance. If None, set to accuracy_score.
+    n_jobs : int
+        Number of jobs to run in parallel. Defaults to 1.
+
+    Attributes
+    ----------
+    picks_ : array-like of int | None
+        The channels indices to include.
+    ch_names : list, array-like, shape (n_channels,)
+        Names of the channels used for training.
+    y_train_ : list | ndarray, shape (n_samples,)
+        The categories used for training.
+    train_times_ : dict
+        A dictionary that configures the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``times`` : ndarray, shape (n_clfs,)
+                The training times (in seconds).
+
+    test_times_ : dict
+        A dictionary that configures the testing times for each training time:
+
+            ``slices`` : ndarray, shape (n_clfs, n_testing_times)
+                Array of time slices (in indices) used for each classifier.
+            ``times`` : ndarray, shape (n_clfs, n_testing_times)
+                The testing times (in seconds) for each training time.
+
+    cv_ : CrossValidation object
+        The actual CrossValidation input depending on y.
+    estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
+        The estimators for each time point and each fold.
+    y_pred_ : list of lists of arrays of floats, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
+        The single-trial predictions estimated by self.predict() at each
+        training time and each testing time. Note that the number of testing
+        times per training time need not be regular, else
+        ``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs).``
+    y_true_ : list | ndarray, shape (n_samples,)
+        The categories used for scoring ``y_pred_``.
+    scorer_ : object
+        scikit-learn Scorer instance.
+    scores_ : list of lists of float
+        The scores estimated by ``self.scorer_`` at each training time and each
+        testing time (e.g. mean accuracy of self.predict(X)). Note that the
+        number of testing times per training time need not be regular;
+        else, ``np.shape(scores) = (n_train_time, n_test_time)``.
+
+    See Also
+    --------
+    TimeDecoding
 
     Notes
     -----
     The function implements the method used in:
 
-    Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
-    and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
-    unexpected sounds", PLOS ONE, 2013
+        Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
+        and Stanislas Dehaene, "Two distinct dynamic modes subtend the
+        detection of unexpected sounds", PLoS ONE, 2014
+        DOI: 10.1371/journal.pone.0085791
+
+    .. versionadded:: 0.9.0
+    """  # noqa
+    def __init__(self, picks=None, cv=5, clf=None, train_times=None,
+                 test_times=None, predict_mode='cross-validation', scorer=None,
+                 n_jobs=1):
+        super(GeneralizationAcrossTime, self).__init__(
+            picks=picks, cv=cv, clf=clf, train_times=train_times,
+            test_times=test_times, predict_mode=predict_mode, scorer=scorer,
+            n_jobs=n_jobs)
+
+    def __repr__(self):
+        s = ''
+        if hasattr(self, "estimators_"):
+            s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
+                self.train_times_['start'], self.train_times_['stop'])
+        else:
+            s += 'no fit'
+        if hasattr(self, 'y_pred_'):
+            s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
+        else:
+            s += ", no prediction"
+        if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
+            s += ',\n '
+        else:
+            s += ', '
+        if hasattr(self, 'scores_'):
+            s += "scored"
+            if callable(self.scorer_):
+                s += " (%s)" % (self.scorer_.__name__)
+        else:
+            s += "no score"
+
+        return "<GAT | %s>" % s
+
+    def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
+             cmap='RdBu_r', show=True, colorbar=True,
+             xlabel=True, ylabel=True):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot the score of each classifier at each tested time window.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        vmin : float | None
+            Min color value for scores. If None, sets to min(``gat.scores_``).
+        vmax : float | None
+            Max color value for scores. If None, sets to max(``gat.scores_``).
+        tlim : ndarray, (train_min, test_max) | None
+            The time limits used for plotting.
+        ax : object | None
+            Plot pointer. If None, generate new figure.
+        cmap : str | cmap object
+            The color map to be used. Defaults to ``'RdBu_r'``.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        colorbar : bool
+            If True, the colorbar of the figure is displayed. Defaults to True.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
+                               tlim=tlim, ax=ax, cmap=cmap, show=show,
+                               colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
+
+    def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
+                      ymax=None, ax=None, show=True, color=None,
+                      xlabel=True, ylabel=True, legend=True, chance=True,
+                      label='Classif. score'):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot each classifier score trained and tested at identical time
+        windows.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        xmin : float | None, optional
+            Min time value.
+        xmax : float | None, optional
+            Max time value.
+        ymin : float | None, optional
+            Min score value. If None, sets to min(scores).
+        ymax : float | None, optional
+            Max score value. If None, sets to max(scores).
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str
+            Score line color.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float. Defaults to None
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        return plot_gat_times(self, train_time='diagonal', title=title,
+                              xmin=xmin, xmax=xmax,
+                              ymin=ymin, ymax=ymax, ax=ax, show=show,
+                              color=color, xlabel=xlabel, ylabel=ylabel,
+                              legend=legend, chance=chance, label=label)
+
+    def plot_times(self, train_time, title=None, xmin=None, xmax=None,
+                   ymin=None, ymax=None, ax=None, show=True, color=None,
+                   xlabel=True, ylabel=True, legend=True, chance=True,
+                   label='Classif. score'):
+        """Plotting function of GeneralizationAcrossTime object
+
+        Plot the scores of the classifier trained at specific training time(s).
+
+        Parameters
+        ----------
+        train_time : float | list or array of float
+            Plots scores of the classifier trained at train_time.
+        title : str | None
+            Figure title.
+        xmin : float | None, optional
+            Min time value.
+        xmax : float | None, optional
+            Max time value.
+        ymin : float | None, optional
+            Min score value. If None, sets to min(scores).
+        ymax : float | None, optional
+            Max score value. If None, sets to max(scores).
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str or list of str
+            Score line color(s).
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float.
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        if (not isinstance(train_time, float) and
+            not (isinstance(train_time, (list, np.ndarray)) and
+                 np.all([isinstance(time, float) for time in train_time]))):
+            raise ValueError('train_time must be float | list or array of '
+                             'floats. Got %s.' % type(train_time))
+
+        return plot_gat_times(self, train_time=train_time, title=title,
+                              xmin=xmin, xmax=xmax,
+                              ymin=ymin, ymax=ymax, ax=ax, show=show,
+                              color=color, xlabel=xlabel, ylabel=ylabel,
+                              legend=legend, chance=chance, label=label)
+
+
+class TimeDecoding(_GeneralizationAcrossTime):
+    """Train and test a series of classifiers at each time point to obtain a
+    score across time.
+
+    Parameters
+    ----------
+    picks : array-like of int | None
+        The channels indices to include. If None the data
+        channels in info, except bad channels, are used.
+    cv : int | object
+        If an integer is passed, it is the number of folds.
+        Specific cross-validation objects can be passed, see
+        scikit-learn.cross_validation module for the list of possible objects.
+        Defaults to 5.
+    clf : object | None
+        An estimator compliant with the scikit-learn API (fit & predict).
+        If None the classifier will be a standard pipeline including
+        StandardScaler and a Logistic Regression with default parameters.
+    times : dict | None
+        A dictionary to configure the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``start`` : float
+                Time at which to start decoding (in seconds). By default,
+                min(epochs.times).
+            ``stop`` : float
+                Maximal time at which to stop decoding (in seconds). By
+                default, max(times).
+            ``step`` : float
+                Duration separating the start of subsequent classifiers (in
+                seconds). By default, equals one time sample.
+            ``length`` : float
+                Duration of each classifier (in seconds). By default, equals
+                one time sample.
+
+        If None, empty dict.
+    predict_mode : {'cross-validation', 'mean-prediction'}
+        Indicates how predictions are achieved with regards to the cross-
+        validation procedure:
+
+            ``cross-validation`` : estimates a single prediction per sample
+                based on the unique independent classifier fitted in the
+                cross-validation.
+            ``mean-prediction`` : estimates k predictions per sample, based on
+                each of the k-fold cross-validation classifiers, and average
+                these predictions into a single estimate per sample.
+
+        Default: 'cross-validation'
+    scorer : object | None
+        scikit-learn Scorer instance. If None, set to accuracy_score.
+    n_jobs : int
+        Number of jobs to run in parallel. Defaults to 1.
+
+    Attributes
+    ----------
+    picks_ : array-like of int | None
+        The channels indices to include.
+    ch_names : list, array-like, shape (n_channels,)
+        Names of the channels used for training.
+    y_train_ : ndarray, shape (n_samples,)
+        The categories used for training.
+    times_ : dict
+        A dictionary that configures the training times:
+
+            ``slices`` : ndarray, shape (n_clfs,)
+                Array of time slices (in indices) used for each classifier.
+                If not given, computed from 'start', 'stop', 'length', 'step'.
+            ``times`` : ndarray, shape (n_clfs,)
+                The training times (in seconds).
+
+    cv_ : CrossValidation object
+        The actual CrossValidation input depending on y.
+    estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
+        The estimators for each time point and each fold.
+    y_pred_ : ndarray, shape (n_times, n_epochs, n_prediction_dims)
+        Class labels for samples in X.
+    y_true_ : list | ndarray, shape (n_samples,)
+        The categories used for scoring ``y_pred_``.
+    scorer_ : object
+        scikit-learn Scorer instance.
+    scores_ : list of float, shape (n_times,)
+        The scores (mean accuracy of self.predict(X) wrt. y.).
+
+    See Also
+    --------
+    GeneralizationAcrossTime
+
+    Notes
+    -----
+    The function is equivalent to the diagonal of GeneralizationAcrossTime()
+
+    .. versionadded:: 0.10
     """
-    from sklearn.base import clone
-    from sklearn.utils import check_random_state
-    from sklearn.svm import SVC
-    from sklearn.pipeline import Pipeline
-    from sklearn.preprocessing import StandardScaler
-    from sklearn.cross_validation import check_cv
-
-    if clf is None:
-        scaler = StandardScaler()
-        svc = SVC(C=1, kernel='linear')
-        clf = Pipeline([('scaler', scaler), ('svc', svc)])
-
-    info = epochs_list[0].info
-    data_picks = pick_types(info, meg=True, eeg=True, exclude='bads')
-
-    # Make arrays X and y such that :
-    # X is 3d with X.shape[0] is the total number of epochs to classify
-    # y is filled with integers coding for the class to predict
-    # We must have X.shape[0] equal to y.shape[0]
-    X = [e.get_data()[:, data_picks, :] for e in epochs_list]
-    y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
-    X = np.concatenate(X)
-    y = np.concatenate(y)
-
-    cv = check_cv(cv, X, y, classifier=True)
-
-    ch_types = set([channel_type(info, idx) for idx in data_picks])
-    logger.info('Running time generalization on %s epochs using %s.' %
-                (len(X), ch_types.pop()))
-
-    if shuffle:
-        rng = check_random_state(random_state)
-        order = np.argsort(rng.randn(len(X)))
-        X = X[order]
-        y = y[order]
-
-    parallel, p_time_gen, _ = parallel_func(_time_gen_one_fold, n_jobs)
-    scores = parallel(p_time_gen(clone(clf), X, y, train, test, scoring)
-                      for train, test in cv)
-    scores = np.mean(scores, axis=0)
-    return scores
+
+    def __init__(self, picks=None, cv=5, clf=None, times=None,
+                 predict_mode='cross-validation', scorer=None, n_jobs=1):
+        super(TimeDecoding, self).__init__(picks=picks, cv=cv, clf=None,
+                                           train_times=times,
+                                           test_times='diagonal',
+                                           predict_mode=predict_mode,
+                                           scorer=scorer, n_jobs=n_jobs)
+        self._clean_times()
+
+    def __repr__(self):
+        s = ''
+        if hasattr(self, "estimators_"):
+            s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
+                self.times_['start'], self.times_['stop'])
+        else:
+            s += 'no fit'
+        if hasattr(self, 'y_pred_'):
+            s += (", predicted %d epochs" % len(self.y_pred_[0]))
+        else:
+            s += ", no prediction"
+        if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
+            s += ',\n '
+        else:
+            s += ', '
+        if hasattr(self, 'scores_'):
+            s += "scored"
+            if callable(self.scorer_):
+                s += " (%s)" % (self.scorer_.__name__)
+        else:
+            s += "no score"
+
+        return "<TimeDecoding | %s>" % s
+
+    def fit(self, epochs, y=None):
+        """ Train a classifier on each specified time slice.
+
+        Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
+        ``y_train``, ``train_times_`` and ``estimators_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs.
+        y : list or ndarray of int, shape (n_samples,) or None, optional
+            To-be-fitted model values. If None, y = epochs.events[:, 2].
+
+        Returns
+        -------
+        self : TimeDecoding
+            Returns fitted TimeDecoding object.
+
+        Notes
+        ------
+        If X and y are not C-ordered and contiguous arrays of np.float64 and
+        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
+
+        If X is a dense array, then the other methods will not support sparse
+        matrices as input.
+        """
+        self._prep_times()
+        super(TimeDecoding, self).fit(epochs, y=y)
+        self._clean_times()
+        return self
+
+    def predict(self, epochs):
+        """ Test each classifier on each specified testing time slice.
+
+        .. note:: This function sets the ``y_pred_`` and ``test_times_``
+                  attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs
+            The epochs. Can be similar to fitted epochs or not. See
+            predict_mode parameter.
+
+        Returns
+        -------
+        y_pred : list of lists of arrays of floats, shape (n_times, n_epochs, n_prediction_dims)
+            The single-trial predictions at each time sample.
+        """  # noqa
+        self._prep_times()
+        super(TimeDecoding, self).predict(epochs)
+        self._clean_times()
+        return self.y_pred_
+
+    def score(self, epochs=None, y=None):
+        """Score Epochs
+
+        Estimate scores across trials by comparing the prediction estimated for
+        each trial to its true value.
+
+        Calls ``predict()`` if it has not been already.
+
+        Note. The function updates the ``scorer_``, ``scores_``, and
+        ``y_true_`` attributes.
+
+        Parameters
+        ----------
+        epochs : instance of Epochs | None, optional
+            The epochs. Can be similar to fitted epochs or not.
+            If None, it needs to rely on the predictions ``y_pred_``
+            generated with ``predict()``.
+        y : list | ndarray, shape (n_epochs,) | None, optional
+            True values to be compared with the predictions ``y_pred_``
+            generated with ``predict()`` via ``scorer_``.
+            If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
+
+        Returns
+        -------
+        scores : list of float, shape (n_times,)
+            The scores estimated by ``scorer_`` at each time sample (e.g. mean
+            accuracy of ``predict(X)``).
+        """
+        if epochs is not None:
+            self.predict(epochs)
+        else:
+            if not hasattr(self, 'y_pred_'):
+                raise RuntimeError('Please predict() epochs first or pass '
+                                   'epochs to score()')
+        self._prep_times()
+        super(TimeDecoding, self).score(epochs=None, y=y)
+        self._clean_times()
+        return self.scores_
+
+    def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,
+             ax=None, show=True, color=None, xlabel=True, ylabel=True,
+             legend=True, chance=True, label='Classif. score'):
+        """Plotting function
+
+        Predict each classifier. If multiple classifiers are passed, average
+        prediction across all classifiers to result in a single prediction per
+        classifier.
+
+        Parameters
+        ----------
+        title : str | None
+            Figure title.
+        xmin : float | None, optional,
+            Min time value.
+        xmax : float | None, optional,
+            Max time value.
+        ymin : float
+            Min score value. Defaults to 0.
+        ymax : float
+            Max score value. Defaults to 1.
+        ax : object | None
+            Instance of mataplotlib.axes.Axis. If None, generate new figure.
+        show : bool
+            If True, the figure will be shown. Defaults to True.
+        color : str
+            Score line color. Defaults to 'steelblue'.
+        xlabel : bool
+            If True, the xlabel is displayed. Defaults to True.
+        ylabel : bool
+            If True, the ylabel is displayed. Defaults to True.
+        legend : bool
+            If True, a legend is displayed. Defaults to True.
+        chance : bool | float. Defaults to None
+            Plot chance level. If True, chance level is estimated from the type
+            of scorer.
+        label : str
+            Score label used in the legend. Defaults to 'Classif. score'.
+
+        Returns
+        -------
+        fig : instance of matplotlib.figure.Figure
+            The figure.
+        """
+        # XXX JRK: need cleanup in viz
+        self._prep_times()
+        fig = plot_gat_times(self, train_time='diagonal', title=title,
+                             xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,
+                             show=show, color=color, xlabel=xlabel,
+                             ylabel=ylabel, legend=legend, chance=chance,
+                             label=label)
+        self._clean_times()
+        return fig
+
+    def _prep_times(self):
+        """Auxiliary function to allow compability with GAT"""
+        self.test_times = 'diagonal'
+        if hasattr(self, 'times'):
+            self.train_times = self.times
+        if hasattr(self, 'times_'):
+            self.train_times_ = self.times_
+            self.test_times_ = _DecodingTime()
+            self.test_times_['slices'] = [[slic] for slic in
+                                          self.train_times_['slices']]
+            self.test_times_['times'] = [[tim] for tim in
+                                         self.train_times_['times']]
+        if hasattr(self, 'scores_'):
+            self.scores_ = [[score] for score in self.scores_]
+        if hasattr(self, 'y_pred_'):
+            self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]
+
+    def _clean_times(self):
+        """Auxiliary function to allow compability with GAT"""
+        if hasattr(self, 'train_times'):
+            self.times = self.train_times
+        if hasattr(self, 'train_times_'):
+            self.times_ = self.train_times_
+        for attr in ['test_times', 'train_times',
+                     'test_times_', 'train_times_']:
+            if hasattr(self, attr):
+                delattr(self, attr)
+        if hasattr(self, 'y_pred_'):
+            self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]
+        if hasattr(self, 'scores_'):
+            self.scores_ = [score[0] for score in self.scores_]
diff --git a/mne/decoding/classifier.py b/mne/decoding/transformer.py
similarity index 68%
rename from mne/decoding/classifier.py
rename to mne/decoding/transformer.py
index 0f97357..27950cd 100644
--- a/mne/decoding/classifier.py
+++ b/mne/decoding/transformer.py
@@ -1,5 +1,6 @@
 # Authors: Mainak Jas <mainak at neuro.hut.fi>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Romain Trachel <trachelr at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -12,7 +13,7 @@ from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
                       band_stop_filter)
 from ..time_frequency import multitaper_psd
 from ..externals import six
-from ..utils import _check_type_picks
+from ..utils import _check_type_picks, deprecated
 
 
 class Scaler(TransformerMixin):
@@ -20,8 +21,8 @@ class Scaler(TransformerMixin):
 
     Parameters
     ----------
-    info : dict
-        measurement info
+    info : instance of Info
+        The measurement info
     with_mean : boolean, True by default
         If True, center the data before scaling.
     with_std : boolean, True by default
@@ -30,23 +31,28 @@ class Scaler(TransformerMixin):
 
     Attributes
     ----------
-    `scale_` : dict
+    info : instance of Info
+        The measurement info
+    ch_mean_ : dict
         The mean value for each channel type
-    `ch_std_` : array
+    std_ : dict
         The standard deviation for each channel type
      """
     def __init__(self, info, with_mean=True, with_std=True):
         self.info = info
         self.with_mean = with_mean
         self.with_std = with_std
+        self.ch_mean_ = dict()  # TODO rename attribute
+        self.std_ = dict()  # TODO rename attribute
 
     def fit(self, epochs_data, y):
-        """
+        """Standardizes data across channels
+
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data to concatenate channels.
-        y : array
+        y : array, shape (n_epochs,)
             The label for each epoch.
 
         Returns
@@ -70,14 +76,13 @@ class Scaler(TransformerMixin):
 
         self.picks_list_ = picks_list
 
-        self.ch_mean_, self.std_ = dict(), dict()
         for key, this_pick in picks_list.items():
             if self.with_mean:
                 ch_mean = X[:, this_pick, :].mean(axis=1)[:, None, :]
-                self.ch_mean_[key] = ch_mean
+                self.ch_mean_[key] = ch_mean  # TODO rename attribute
             if self.with_std:
                 ch_std = X[:, this_pick, :].mean(axis=1)[:, None, :]
-                self.std_[key] = ch_std
+                self.std_[key] = ch_std  # TODO rename attribute
 
         return self
 
@@ -86,15 +91,17 @@ class Scaler(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
 
         Returns
         -------
-        X : array of shape (n_epochs, n_channels * n_times)
+        X : array, shape (n_epochs, n_channels, n_times)
             The data concatenated over channels.
         """
-
         if not isinstance(epochs_data, np.ndarray):
             raise ValueError("epochs_data should be of type ndarray (got %s)."
                              % type(epochs_data))
@@ -109,27 +116,67 @@ class Scaler(TransformerMixin):
 
         return X
 
+    def inverse_transform(self, epochs_data, y=None):
+        """ Inverse standardization of data across channels
+
+        Parameters
+        ----------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        X : array, shape (n_epochs, n_channels, n_times)
+            The data concatenated over channels.
+        """
+        if not isinstance(epochs_data, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(epochs_data))
+
+        X = np.atleast_3d(epochs_data)
+
+        for key, this_pick in six.iteritems(self.picks_list_):
+            if self.with_mean:
+                X[:, this_pick, :] += self.ch_mean_[key]
+            if self.with_std:
+                X[:, this_pick, :] *= self.std_[key]
+
+        return X
+
 
-class ConcatenateChannels(TransformerMixin):
-    """Concatenates data from different channels into a single feature vector
+class EpochsVectorizer(TransformerMixin):
+    """EpochsVectorizer transforms epoch data to fit into a scikit-learn pipeline.
 
     Parameters
     ----------
-    info : dict
+    info : instance of Info
         The measurement info.
+
+    Attributes
+    ----------
+    n_channels : int
+        The number of channels.
+    n_times : int
+        The number of time points.
+
     """
     def __init__(self, info=None):
         self.info = info
+        self.n_channels = None
+        self.n_times = None
 
     def fit(self, epochs_data, y):
-        """Concatenates data from different channels into a single feature
-        vector
+        """For each epoch, concatenate data from different channels into a single
+        feature vector.
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data to concatenate channels.
-        y : array
+        y : array, shape (n_epochs,)
             The label for each epoch.
 
         Returns
@@ -144,17 +191,20 @@ class ConcatenateChannels(TransformerMixin):
         return self
 
     def transform(self, epochs_data, y=None):
-        """Concatenates data from different channels into a single feature
-        vector
+        """For each epoch, concatenate data from different channels into a single
+        feature vector.
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
 
         Returns
         -------
-        X : array, shape (n_epochs, n_channels*n_times)
+        X : array, shape (n_epochs, n_channels * n_times)
             The data concatenated over channels
         """
         if not isinstance(epochs_data, np.ndarray):
@@ -165,9 +215,41 @@ class ConcatenateChannels(TransformerMixin):
 
         n_epochs, n_channels, n_times = epochs_data.shape
         X = epochs_data.reshape(n_epochs, n_channels * n_times)
+        # save attributes for inverse_transform
+        self.n_epochs = n_epochs
+        self.n_channels = n_channels
+        self.n_times = n_times
 
         return X
 
+    def inverse_transform(self, X, y=None):
+        """For each epoch, reshape a feature vector into the original data shape
+
+        Parameters
+        ----------
+        X : array, shape (n_epochs, n_channels * n_times)
+            The feature vector concatenated over channels
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
+
+        Returns
+        -------
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
+            The original data
+        """
+        if not isinstance(X, np.ndarray):
+            raise ValueError("epochs_data should be of type ndarray (got %s)."
+                             % type(X))
+
+        return X.reshape(-1, self.n_channels, self.n_times)
+
+
+ at deprecated("Class 'ConcatenateChannels' has been renamed to "
+            "'EpochsVectorizer' and will be removed in release 0.11.")
+class ConcatenateChannels(EpochsVectorizer):
+    pass
+
 
 class PSDEstimator(TransformerMixin):
     """Compute power spectrum density (PSD) using a multi-taper method
@@ -198,8 +280,8 @@ class PSDEstimator(TransformerMixin):
         If not None, override default verbose level (see mne.verbose).
     """
     def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
-                 adaptive=False, low_bias=True, n_jobs=1, normalization='length',
-                 verbose=None):
+                 adaptive=False, low_bias=True, n_jobs=1,
+                 normalization='length', verbose=None):
         self.sfreq = sfreq
         self.fmin = fmin
         self.fmax = fmax
@@ -215,9 +297,9 @@ class PSDEstimator(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
-        y : array
+        y : array, shape (n_epochs,)
             The label for each epoch
 
         Returns
@@ -237,12 +319,15 @@ class PSDEstimator(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
 
         Returns
         -------
-        psd : array, shape=(n_signals, len(freqs)) or (len(freqs),)
+        psd : array, shape (n_signals, len(freqs)) or (len(freqs),)
             The computed PSD.
         """
 
@@ -276,17 +361,18 @@ class FilterEstimator(TransformerMixin):
 
     l_freq and h_freq are the frequencies below which and above which,
     respectively, to filter out of the data. Thus the uses are:
-        l_freq < h_freq: band-pass filter
-        l_freq > h_freq: band-stop filter
-        l_freq is not None, h_freq is None: low-pass filter
-        l_freq is None, h_freq is not None: high-pass filter
 
-    Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
-          additional time points need to be temporarily stored in memory.
+        - l_freq < h_freq: band-pass filter
+        - l_freq > h_freq: band-stop filter
+        - l_freq is not None, h_freq is None: low-pass filter
+        - l_freq is None, h_freq is not None: high-pass filter
+
+    If n_jobs > 1, more memory is required as "len(picks) * n_times"
+    additional time points need to be temporarily stored in memory.
 
     Parameters
     ----------
-    info : dict
+    info : instance of Info
         Measurement info.
     l_freq : float | None
         Low cut-off frequency in Hz. If None the data are only low-passed.
@@ -323,7 +409,7 @@ class FilterEstimator(TransformerMixin):
     """
     def __init__(self, info, l_freq, h_freq, picks=None, filter_length='10s',
                  l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
-                 method='fft', iir_params=None):
+                 method='fft', iir_params=None, verbose=None):
         self.info = info
         self.l_freq = l_freq
         self.h_freq = h_freq
@@ -340,8 +426,10 @@ class FilterEstimator(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
+        y : array, shape (n_epochs,)
+            The label for each epoch.
 
         Returns
         -------
@@ -358,17 +446,25 @@ class FilterEstimator(TransformerMixin):
 
         if self.l_freq == 0:
             self.l_freq = None
-        if self.h_freq > (self.info['sfreq'] / 2.):
+        if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
             self.h_freq = None
-
-        if self.h_freq is not None and \
-                (self.l_freq is None or self.l_freq < self.h_freq) and \
-                self.h_freq < self.info['lowpass']:
+        if self.l_freq is not None and not isinstance(self.l_freq, float):
+            self.l_freq = float(self.l_freq)
+        if self.h_freq is not None and not isinstance(self.h_freq, float):
+            self.h_freq = float(self.h_freq)
+
+        if self.info['lowpass'] is None or (self.h_freq is not None and
+                                            (self.l_freq is None or
+                                             self.l_freq < self.h_freq) and
+                                            self.h_freq <
+                                            self.info['lowpass']):
             self.info['lowpass'] = self.h_freq
 
-        if self.l_freq is not None and \
-                (self.h_freq is None or self.l_freq < self.h_freq) and \
-                self.l_freq > self.info['highpass']:
+        if self.info['highpass'] is None or (self.l_freq is not None and
+                                             (self.h_freq is None or
+                                              self.l_freq < self.h_freq) and
+                                             self.l_freq >
+                                             self.info['highpass']):
             self.info['highpass'] = self.l_freq
 
         return self
@@ -378,12 +474,15 @@ class FilterEstimator(TransformerMixin):
 
         Parameters
         ----------
-        epochs_data : array, shape=(n_epochs, n_channels, n_times)
+        epochs_data : array, shape (n_epochs, n_channels, n_times)
             The data.
+        y : None | array, shape (n_epochs,)
+            The label for each epoch.
+            If None not used. Defaults to None.
 
         Returns
         -------
-        X : array, shape=(n_epochs, n_channels, n_times)
+        X : array, shape (n_epochs, n_channels, n_times)
             The data after filtering
         """
         if not isinstance(epochs_data, np.ndarray):
@@ -394,7 +493,7 @@ class FilterEstimator(TransformerMixin):
 
         if self.l_freq is None and self.h_freq is not None:
             epochs_data = \
-                low_pass_filter(epochs_data, self.fs, self.h_freq,
+                low_pass_filter(epochs_data, self.info['sfreq'], self.h_freq,
                                 filter_length=self.filter_length,
                                 trans_bandwidth=self.l_trans_bandwidth,
                                 method=self.method, iir_params=self.iir_params,
diff --git a/mne/defaults.py b/mne/defaults.py
new file mode 100644
index 0000000..6a58b47
--- /dev/null
+++ b/mne/defaults.py
@@ -0,0 +1,54 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+from copy import deepcopy
+
+DEFAULTS = dict(
+    color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
+               emg='k', ref_meg='steelblue', misc='k', stim='k',
+               resp='k', chpi='k', exci='k', ias='k', syst='k',
+               seeg='k'),
+    config_opts=dict(),
+    units=dict(eeg='uV', grad='fT/cm', mag='fT', eog='uV', misc='AU',
+               seeg='uV'),
+    scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6,
+                  misc=1.0, seeg=1e4),
+    scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
+                           eog=150e-6, ecg=5e-4, emg=1e-3,
+                           ref_meg=1e-12, misc=1e-3,
+                           stim=1, resp=1, chpi=1e-4, exci=1,
+                           ias=1, syst=1, seeg=1e-5),
+    scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5),
+    ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
+              eeg=(-200., 200.), misc=(-5., 5.),
+              seeg=(-200., 200.)),
+    titles=dict(eeg='EEG', grad='Gradiometers',
+                mag='Magnetometers', misc='misc', seeg='sEEG'),
+    mask_params=dict(marker='o',
+                     markerfacecolor='w',
+                     markeredgecolor='k',
+                     linewidth=0,
+                     markeredgewidth=1,
+                     markersize=4),
+)
+
+
+def _handle_default(k, v=None):
+    """Helper to avoid dicts as default keyword arguments
+
+    Use this function instead to resolve default dict values. Example usage::
+
+        scalings = _handle_default('scalings', scalings)
+
+    """
+    this_mapping = deepcopy(DEFAULTS[k])
+    if v is not None:
+        if isinstance(v, dict):
+            this_mapping.update(v)
+        else:
+            for key in this_mapping.keys():
+                this_mapping[key] = v
+    return this_mapping
diff --git a/mne/dipole.py b/mne/dipole.py
index faf0c44..64a313f 100644
--- a/mne/dipole.py
+++ b/mne/dipole.py
@@ -1,14 +1,221 @@
-# Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Eric Larson <larson.eric.d at gmail.com>
 #
 # License: Simplified BSD
 
 import numpy as np
+from scipy import linalg
+from copy import deepcopy
+import re
 
-from .utils import logger, verbose
+from .cov import read_cov, _get_whitener_data
+from .io.pick import pick_types, channel_type
+from .io.proj import make_projector, _has_eeg_average_ref_proj
+from .bem import _fit_sphere
+from .transforms import (_print_coord_trans, _coord_frame_name,
+                         apply_trans, invert_transform, Transform)
 
+from .forward._make_forward import (_get_mri_head_t, _setup_bem,
+                                    _prep_meg_channels, _prep_eeg_channels)
+from .forward._compute_forward import (_compute_forwards_meeg,
+                                       _prep_field_computation)
+
+from .externals.six import string_types
+from .surface import (transform_surface_to, _normalize_vectors,
+                      _get_ico_surface, _compute_nearest)
+from .bem import _bem_find_surface, _bem_explain_surface
+from .source_space import (_make_volume_source_space, SourceSpaces,
+                           _points_outside_surface)
+from .parallel import parallel_func
+from .fixes import partial
+from .utils import logger, verbose, _time_mask
+
+
+class Dipole(object):
+    """Dipole class
+
+    Used to store positions, orientations, amplitudes, times, goodness of fit
+    of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
+    or certain inverse solvers.
+
+    Parameters
+    ----------
+    times : array, shape (n_dipoles,)
+        The time instants at which each dipole was fitted (sec).
+    pos : array, shape (n_dipoles, 3)
+        The dipoles positions (m).
+    amplitude : array, shape (n_dipoles,)
+        The amplitude of the dipoles (nAm).
+    ori : array, shape (n_dipoles, 3)
+        The dipole orientations (normalized to unit length).
+    gof : array, shape (n_dipoles,)
+        The goodness of fit.
+    name : str | None
+        Name of the dipole.
+    """
+    def __init__(self, times, pos, amplitude, ori, gof, name=None):
+        self.times = times
+        self.pos = pos
+        self.amplitude = amplitude
+        self.ori = ori
+        self.gof = gof
+        self.name = name
+
+    def __repr__(self):
+        s = "n_times : %s" % len(self.times)
+        s += ", tmin : %s" % np.min(self.times)
+        s += ", tmax : %s" % np.max(self.times)
+        return "<Dipole  |  %s>" % s
+
+    def save(self, fname):
+        """Save dipole in a .dip file
+
+        Parameters
+        ----------
+        fname : str
+            The name of the .dip file.
+        """
+        fmt = "  %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
+        with open(fname, 'wb') as fid:
+            fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
+            fid.write('#   begin     end   X (mm)   Y (mm)   Z (mm)'
+                      '   Q(nAm)  Qx(nAm)  Qy(nAm)  Qz(nAm)    g/%\n'
+                      .encode('utf-8'))
+            t = self.times[:, np.newaxis] * 1000.
+            gof = self.gof[:, np.newaxis]
+            amp = 1e9 * self.amplitude[:, np.newaxis]
+            out = np.concatenate((t, t, self.pos / 1e-3, amp,
+                                  self.ori * amp, gof), axis=-1)
+            np.savetxt(fid, out, fmt=fmt)
+            if self.name is not None:
+                fid.write(('## Name "%s dipoles" Style "Dipoles"'
+                           % self.name).encode('utf-8'))
+
+    def crop(self, tmin=None, tmax=None):
+        """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        """
+        mask = _time_mask(self.times, tmin, tmax)
+        for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
+            setattr(self, attr, getattr(self, attr)[mask])
+
+    def copy(self):
+        """Copy the Dipoles object
+
+        Returns
+        -------
+        dip : instance of Dipole
+            The copied dipole instance.
+        """
+        return deepcopy(self)
+
+    @verbose
+    def plot_locations(self, trans, subject, subjects_dir=None,
+                       bgcolor=(1, 1, 1), opacity=0.3,
+                       brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
+                       fig_name=None, fig_size=(600, 600), mode='cone',
+                       scale_factor=0.1e-1, colors=None, verbose=None):
+        """Plot dipole locations as arrows
+
+        Parameters
+        ----------
+        trans : dict
+            The mri to head trans.
+        subject : str
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT.
+        subjects_dir : None | str
+            The path to the freesurfer subjects reconstructions.
+            It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+            The default is None.
+        bgcolor : tuple of length 3
+            Background color in 3D.
+        opacity : float in [0, 1]
+            Opacity of brain mesh.
+        brain_color : tuple of length 3
+            Brain color.
+        mesh_color : tuple of length 3
+            Mesh color.
+        fig_name : tuple of length 2
+            Mayavi figure name.
+        fig_size : tuple of length 2
+            Mayavi figure size.
+        mode : str
+            Should be ``'cone'`` or ``'sphere'`` to specify how the
+            dipoles should be shown.
+        scale_factor : float
+            The scaling applied to amplitudes for the plot.
+        colors: list of colors | None
+            Color to plot with each dipole. If None defaults colors are used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : instance of mlab.Figure
+            The mayavi figure.
+        """
+        from .viz import plot_dipole_locations
+        dipoles = []
+        for t in self.times:
+            dipoles.append(self.copy())
+            dipoles[-1].crop(t, t)
+        return plot_dipole_locations(
+            dipoles, trans, subject, subjects_dir, bgcolor, opacity,
+            brain_color, mesh_color, fig_name, fig_size, mode, scale_factor,
+            colors)
+
+    def plot_amplitudes(self, color='k', show=True):
+        """Plot the dipole amplitudes as a function of time
+
+        Parameters
+        ----------
+        color: matplotlib Color
+            Color to use for the trace.
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure object containing the plot.
+        """
+        from .viz import plot_dipole_amplitudes
+        return plot_dipole_amplitudes([self], [color], show)
+
+    def __getitem__(self, idx_slice):
+        """Handle indexing"""
+        if isinstance(idx_slice, int):  # make sure attributes stay 2d
+            idx_slice = [idx_slice]
+
+        selected_times = self.times[idx_slice].copy()
+        selected_pos = self.pos[idx_slice, :].copy()
+        selected_amplitude = self.amplitude[idx_slice].copy()
+        selected_ori = self.ori[idx_slice, :].copy()
+        selected_gof = self.gof[idx_slice].copy()
+        selected_name = self.name
+
+        new_dipole = Dipole(selected_times, selected_pos,
+                            selected_amplitude, selected_ori,
+                            selected_gof, selected_name)
+        return new_dipole
+
+    def __len__(self):
+        """Handle len function"""
+        return self.pos.shape[0]
+
+
+# #############################################################################
+# IO
 
 @verbose
-def read_dip(fname, verbose=None):
+def read_dipole(fname, verbose=None):
     """Read .dip file from Neuromag/xfit or MNE
 
     Parameters
@@ -20,27 +227,494 @@ def read_dip(fname, verbose=None):
 
     Returns
     -------
-    time : array, shape=(n_dipoles,)
-        The time instants at which each dipole was fitted.
-    pos : array, shape=(n_dipoles, 3)
-        The dipoles positions in meters
-    amplitude : array, shape=(n_dipoles,)
-        The amplitude of the dipoles in nAm
-    ori : array, shape=(n_dipoles, 3)
-        The dipolar moments. Amplitude of the moment is in nAm.
-    gof : array, shape=(n_dipoles,)
-        The goodness of fit
+    dipole : instance of Dipole
+        The dipole.
     """
     try:
         data = np.loadtxt(fname, comments='%')
     except:
         data = np.loadtxt(fname, comments='#')  # handle 2 types of comments...
+    name = None
+    with open(fname, 'r') as fid:
+        for line in fid.readlines():
+            if line.startswith('##') or line.startswith('%%'):
+                m = re.search('Name "(.*) dipoles"', line)
+                if m:
+                    name = m.group(1)
+                    break
     if data.ndim == 1:
         data = data[None, :]
     logger.info("%d dipole(s) found" % len(data))
-    time = data[:, 0]
+    times = data[:, 0] / 1000.
     pos = 1e-3 * data[:, 2:5]  # put data in meters
     amplitude = data[:, 5]
-    ori = data[:, 6:9]
+    norm = amplitude.copy()
+    amplitude /= 1e9
+    norm[norm == 0] = 1
+    ori = data[:, 6:9] / norm[:, np.newaxis]
     gof = data[:, 9]
-    return time, pos, amplitude, ori, gof
+    return Dipole(times, pos, amplitude, ori, gof, name)
+
+
+# #############################################################################
+# Fitting
+
+def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
+    """Compute the forward solution and do other nice stuff"""
+    B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
+    B = np.concatenate(B, axis=1)
+    B_orig = B.copy()
+
+    # Apply projection and whiten (cov has projections already)
+    B = np.dot(B, whitener.T)
+
+    # column normalization doesn't affect our fitting, so skip for now
+    # S = np.sum(B * B, axis=1)  # across channels
+    # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
+    #                                        axis=1)), 3)
+    # B *= scales[:, np.newaxis]
+    scales = np.ones(3)
+    return B, B_orig, scales
+
+
+def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
+    """Make a guess space inside a sphere or BEM surface"""
+    if isinstance(surf_or_rad, dict):
+        surf = surf_or_rad
+        logger.info('Guess surface (%s) is in %s coordinates'
+                    % (_bem_explain_surface(surf['id']),
+                       _coord_frame_name(surf['coord_frame'])))
+    else:
+        radius = surf_or_rad[0]
+        logger.info('Making a spherical guess space with radius %7.1f mm...'
+                    % (1000 * radius))
+        surf = _get_ico_surface(3)
+        _normalize_vectors(surf['rr'])
+        surf['rr'] *= radius
+        surf['rr'] += r0
+    logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
+    src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
+                                    do_neighbors=False, n_jobs=n_jobs)
+    # simplify the result to make things easier later
+    src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
+               nuse=src['nuse'], coord_frame=src['coord_frame'],
+               vertno=np.arange(src['nuse']))
+    return SourceSpaces([src])
+
+
+def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
+    """Calculate the residual sum of squares"""
+    if fwd_svd is None:
+        fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
+        uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
+    else:
+        uu, sing, vv = fwd_svd
+    gof = _dipole_gof(uu, sing, vv, B, B2)[0]
+    # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
+    return 1. - gof
+
+
+def _dipole_gof(uu, sing, vv, B, B2):
+    """Calculate the goodness of fit from the forward SVD"""
+    ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
+    one = np.dot(vv[:ncomp], B)
+    Bm2 = np.sum(one * one)
+    gof = Bm2 / B2
+    return gof, one
+
+
+def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd):
+    """Fit the dipole moment once the location is known"""
+    fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
+                                             rd[np.newaxis, :])
+    uu, sing, vv = linalg.svd(fwd, full_matrices=False)
+    gof, one = _dipole_gof(uu, sing, vv, B, B2)
+    ncomp = len(one)
+    # Counteract the effect of column normalization
+    Q = scales[0] * np.sum(uu.T[:ncomp] * (one / sing[:ncomp])[:, np.newaxis],
+                           axis=0)
+    # apply the projector to both elements
+    B_residual = np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig),
+                                                  proj_op.T)
+    return Q, gof, B_residual
+
+
+def _fit_dipoles(min_dist_to_inner_skull, data, times, guess_rrs,
+                 guess_fwd_svd, fwd_data, whitener, proj_op, n_jobs):
+    """Fit a single dipole to the given whitened, projected data"""
+    from scipy.optimize import fmin_cobyla
+    parallel, p_fun, _ = parallel_func(_fit_dipole, n_jobs)
+    # parallel over time points
+    res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
+                         guess_fwd_svd, fwd_data, whitener, proj_op,
+                         fmin_cobyla)
+                   for B, t in zip(data.T, times))
+    pos = np.array([r[0] for r in res])
+    amp = np.array([r[1] for r in res])
+    ori = np.array([r[2] for r in res])
+    gof = np.array([r[3] for r in res]) * 100  # convert to percentage
+    residual = np.array([r[4] for r in res]).T
+
+    return pos, amp, ori, gof, residual
+
+
+'''Simplex code in case we ever want/need it for testing
+
+def _make_tetra_simplex():
+    """Make the initial tetrahedron"""
+    #
+    # For this definition of a regular tetrahedron, see
+    #
+    # http://mathworld.wolfram.com/Tetrahedron.html
+    #
+    x = np.sqrt(3.0) / 3.0
+    r = np.sqrt(6.0) / 12.0
+    R = 3 * r
+    d = x / 2.0
+    simplex = 1e-2 * np.array([[x, 0.0, -r],
+                               [-d, 0.5, -r],
+                               [-d, -0.5, -r],
+                               [0., 0., R]])
+    return simplex
+
+
+def try_(p, y, psum, ndim, fun, ihi, neval, fac):
+    """Helper to try a value"""
+    ptry = np.empty(ndim)
+    fac1 = (1.0 - fac) / ndim
+    fac2 = fac1 - fac
+    ptry = psum * fac1 - p[ihi] * fac2
+    ytry = fun(ptry)
+    neval += 1
+    if ytry < y[ihi]:
+        y[ihi] = ytry
+        psum[:] += ptry - p[ihi]
+        p[ihi] = ptry
+    return ytry, neval
+
+
+def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
+    """Minimization with the simplex algorithm
+
+    Modified from Numerical recipes"""
+    y = np.array([fun(s) for s in p])
+    ndim = p.shape[1]
+    assert p.shape[0] == ndim + 1
+    mpts = ndim + 1
+    neval = 0
+    psum = p.sum(axis=0)
+
+    loop = 1
+    while(True):
+        ilo = 1
+        if y[1] > y[2]:
+            ihi = 1
+            inhi = 2
+        else:
+            ihi = 2
+            inhi = 1
+        for i in range(mpts):
+            if y[i] < y[ilo]:
+                ilo = i
+            if y[i] > y[ihi]:
+                inhi = ihi
+                ihi = i
+            elif y[i] > y[inhi]:
+                if i != ihi:
+                    inhi = i
+
+        rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
+        if rtol < ftol:
+            break
+        if neval >= max_eval:
+            raise RuntimeError('Maximum number of evaluations exceeded.')
+        if stol > 0:  # Has the simplex collapsed?
+            dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
+            if loop > 5 and dsum < stol:
+                break
+
+        ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
+        if ytry <= y[ilo]:
+            ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
+        elif ytry >= y[inhi]:
+            ysave = y[ihi]
+            ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
+            if ytry >= ysave:
+                for i in range(mpts):
+                    if i != ilo:
+                        psum[:] = 0.5 * (p[i] + p[ilo])
+                        p[i] = psum
+                        y[i] = fun(psum)
+                neval += ndim
+                psum = p.sum(axis=0)
+        loop += 1
+'''
+
+
+def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
+                guess_fwd_svd, fwd_data, whitener, proj_op,
+                fmin_cobyla):
+    """Fit a single bit of data"""
+    B = np.dot(whitener, B_orig)
+
+    # make constraint function to keep the solver within the inner skull
+    if isinstance(fwd_data['inner_skull'], dict):  # bem
+        surf = fwd_data['inner_skull']
+
+        def constraint(rd):
+
+            dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
+                                    return_dists=True)[1][0]
+
+            if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
+                dist *= -1.
+
+            # Once we know the dipole is below the inner skull,
+            # let's check if its distance to the inner skull is at least
+            # min_dist_to_inner_skull. This can be enforced by adding a
+            # constrain proportional to its distance.
+            dist -= min_dist_to_inner_skull
+            return dist
+
+    else:  # sphere
+        surf = None
+        R, r0 = fwd_data['inner_skull']
+        R_adj = R - min_dist_to_inner_skull
+
+        def constraint(rd):
+            return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
+
+    # Find a good starting point (find_best_guess in C)
+    B2 = np.dot(B, B)
+    if B2 == 0:
+        logger.warning('Zero field found for time %s' % t)
+        return np.zeros(3), 0, np.zeros(3), 0
+
+    idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
+                     for fi, fwd_svd in enumerate(guess_fwd_svd)])
+    x0 = guess_rrs[idx]
+    fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
+
+    # Tested minimizers:
+    #    Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
+    # Several were similar, but COBYLA won for having a handy constraint
+    # function we can use to ensure we stay inside the inner skull /
+    # smallest sphere
+    rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
+                           rhobeg=5e-2, rhoend=5e-5, disp=False)
+
+    # simplex = _make_tetra_simplex() + x0
+    # _simplex_minimize(simplex, 1e-4, 2e-4, fun)
+    # rd_final = simplex[0]
+
+    # Compute the dipole moment at the final point
+    Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
+                              rd_final)
+    amp = np.sqrt(np.dot(Q, Q))
+    norm = 1. if amp == 0. else amp
+    ori = Q / norm
+
+    msg = '---- Fitted : %7.1f ms' % (1000. * t)
+    if surf is not None:
+        dist_to_inner_skull = _compute_nearest(surf['rr'],
+                                               rd_final[np.newaxis, :],
+                                               return_dists=True)[1][0]
+        msg += (", distance to inner skull : %2.4f mm"
+                % (dist_to_inner_skull * 1000.))
+
+    logger.info(msg)
+    return rd_final, amp, ori, gof, residual
+
+
+ at verbose
+def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
+               verbose=None):
+    """Fit a dipole
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The dataset to fit.
+    cov : str | instance of Covariance
+        The noise covariance.
+    bem : str | dict
+        The BEM filename (str) or a loaded sphere model (dict).
+    trans : str | None
+        The head<->MRI transform filename. Must be provided unless BEM
+        is a sphere model.
+    min_dist : float
+        Minimum distance (in milimeters) from the dipole to the inner skull.
+        Must be positive. Note that because this is a constraint passed to
+        a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
+        fits could be 4.9 mm from the inner skull.
+    n_jobs : int
+        Number of jobs to run in parallel (used in field computation
+        and fitting).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    dip : instance of Dipole
+        The dipole fits.
+    residual : ndarray, shape (n_meeg_channels, n_times)
+        The good M-EEG data channels with the fitted dipolar activity
+        removed.
+
+    See Also
+    --------
+    mne.beamformer.rap_music
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # This could eventually be adapted to work with other inputs, these
+    # are what is needed:
+
+    evoked = evoked.copy()
+
+    # Determine if a list of projectors has an average EEG ref
+    if "eeg" in evoked and not _has_eeg_average_ref_proj(evoked.info['projs']):
+        raise ValueError('EEG average reference is mandatory for dipole '
+                         'fitting.')
+
+    if min_dist < 0:
+        raise ValueError('min_dist should be positive. Got %s' % min_dist)
+
+    data = evoked.data
+    info = evoked.info
+    times = evoked.times.copy()
+    comment = evoked.comment
+
+    # Convert the min_dist to meters
+    min_dist_to_inner_skull = min_dist / 1000.
+    del min_dist
+
+    # Figure out our inputs
+    neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
+    if isinstance(bem, string_types):
+        logger.info('BEM              : %s' % bem)
+    if trans is not None:
+        logger.info('MRI transform    : %s' % trans)
+        mri_head_t, trans = _get_mri_head_t(trans)
+    else:
+        mri_head_t = Transform('head', 'mri', np.eye(4))
+    bem = _setup_bem(bem, bem, neeg, mri_head_t)
+    if not bem['is_sphere']:
+        if trans is None:
+            raise ValueError('mri must not be None if BEM is provided')
+        # Find the best-fitting sphere
+        inner_skull = _bem_find_surface(bem, 'inner_skull')
+        inner_skull = inner_skull.copy()
+        R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
+        r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
+        logger.info('Grid origin      : '
+                    '%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
+                    % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
+    else:
+        r0 = bem['r0']
+        logger.info('Sphere model     : origin at (% 7.2f % 7.2f % 7.2f) mm'
+                    % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2]))
+        if 'layers' in bem:
+            R = bem['layers'][0]['rad']
+        else:
+            R = np.inf
+        inner_skull = [R, r0]
+    r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
+                         r0[np.newaxis, :])[0]
+
+    # Eventually these could be parameters, but they are just used for
+    # the initial grid anyway
+    guess_grid = 0.02  # MNE-C uses 0.01, but this is faster w/similar perf
+    guess_mindist = max(0.005, min_dist_to_inner_skull)
+    guess_exclude = 0.02
+    accurate = False  # can be made an option later (shouldn't make big diff)
+
+    logger.info('Guess grid       : %6.1f mm' % (1000 * guess_grid,))
+    if guess_mindist > 0.0:
+        logger.info('Guess mindist    : %6.1f mm' % (1000 * guess_mindist,))
+    if guess_exclude > 0:
+        logger.info('Guess exclude    : %6.1f mm' % (1000 * guess_exclude,))
+    logger.info('Using %s MEG coil definitions.'
+                % ("accurate" if accurate else "standard"))
+    if isinstance(cov, string_types):
+        logger.info('Noise covariance : %s' % (cov,))
+        cov = read_cov(cov, verbose=False)
+    logger.info('')
+
+    _print_coord_trans(mri_head_t)
+    _print_coord_trans(info['dev_head_t'])
+    logger.info('%d bad channels total' % len(info['bads']))
+
+    # Forward model setup (setup_forward_model from setup.c)
+    ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
+
+    megcoils, compcoils, megnames, meg_info = [], [], [], None
+    eegels, eegnames = [], []
+    if 'grad' in ch_types or 'mag' in ch_types:
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, exclude='bads',
+                               accurate=accurate, verbose=verbose)
+    if 'eeg' in ch_types:
+        eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
+                                              verbose=verbose)
+
+    # Ensure that MEG and/or EEG channels are present
+    if len(megcoils + eegels) == 0:
+        raise RuntimeError('No MEG or EEG channels found.')
+
+    # Whitener for the data
+    logger.info('Decomposing the sensor noise covariance matrix...')
+    picks = pick_types(info, meg=True, eeg=True)
+
+    # In case we want to more closely match MNE-C for debugging:
+    # from .io.pick import pick_info
+    # from .cov import prepare_noise_cov
+    # info_nb = pick_info(info, picks)
+    # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
+    # nzero = (cov['eig'] > 0)
+    # n_chan = len(info_nb['ch_names'])
+    # whitener = np.zeros((n_chan, n_chan), dtype=np.float)
+    # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
+    # whitener = np.dot(whitener, cov['eigvec'])
+
+    whitener = _get_whitener_data(info, cov, picks, verbose=False)
+
+    # Proceed to computing the fits (make_guess_data)
+    logger.info('\n---- Computing the forward solution for the guesses...')
+    guess_src = _make_guesses(inner_skull, r0_mri,
+                              guess_grid, guess_exclude, guess_mindist,
+                              n_jobs=n_jobs)[0]
+    if isinstance(inner_skull, dict):
+        transform_surface_to(inner_skull, 'head', mri_head_t)
+    transform_surface_to(guess_src, 'head', mri_head_t)
+
+    # C code computes guesses using a sphere model for speed, don't bother here
+    logger.info('Go through all guess source locations...')
+    fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
+                    ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
+                    inner_skull=inner_skull)
+    _prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
+                            verbose=False)
+    guess_fwd = _dipole_forwards(fwd_data, whitener, guess_src['rr'],
+                                 n_jobs=n_jobs)[0]
+    # decompose ahead of time
+    guess_fwd_svd = [linalg.svd(fwd, overwrite_a=True, full_matrices=False)
+                     for fwd in np.array_split(guess_fwd,
+                                               len(guess_src['rr']))]
+    del guess_fwd  # destroyed
+    logger.info('[done %d sources]' % guess_src['nuse'])
+
+    # Do actual fits
+    data = data[picks]
+    ch_names = [info['ch_names'][p] for p in picks]
+    proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
+    out = _fit_dipoles(min_dist_to_inner_skull, data, times, guess_src['rr'],
+                       guess_fwd_svd, fwd_data,
+                       whitener, proj_op, n_jobs)
+    dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
+    residual = out[4]
+
+    logger.info('%d dipoles fitted' % len(dipoles.times))
+    return dipoles, residual
diff --git a/mne/epochs.py b/mne/epochs.py
index bac78b2..305fabf 100644
--- a/mne/epochs.py
+++ b/mne/epochs.py
@@ -8,69 +8,224 @@
 #
 # License: BSD (3-clause)
 
-from .externals.six import string_types
-
-import copy as cp
+from copy import deepcopy
 import warnings
 import json
+import inspect
+import os.path as op
+from distutils.version import LooseVersion
 
 import numpy as np
+import scipy
 
 from .io.write import (start_file, start_block, end_file, end_block,
                        write_int, write_float_matrix, write_float,
-                       write_id, write_string)
+                       write_id, write_string, _get_split_size)
 from .io.meas_info import read_meas_info, write_meas_info, _merge_info
-from .io.open import fiff_open
+from .io.open import fiff_open, _get_next_fname
 from .io.tree import dir_tree_find
-from .io.tag import read_tag
+from .io.tag import read_tag, read_tag_info
 from .io.constants import FIFF
 from .io.pick import (pick_types, channel_indices_by_type, channel_type,
                       pick_channels, pick_info)
-from .io.proj import setup_proj, ProjMixin
-from .io.base import _BaseRaw, _time_as_index, _index_as_time
-from .evoked import EvokedArray, aspect_rev
+from .io.proj import setup_proj, ProjMixin, _proj_equal
+from .io.base import _BaseRaw, ToDataFrameMixin
+from .evoked import EvokedArray, _aspect_rev
 from .baseline import rescale
-from .utils import (check_random_state, _check_pandas_index_arguments,
-                    _check_pandas_installed, object_hash)
-from .channels import ContainsMixin, PickDropChannelsMixin
-from .filter import resample, detrend
+from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                SetChannelsMixin, InterpolationMixin)
+from .filter import resample, detrend, FilterMixin
 from .event import _read_events_fif
 from .fixes import in1d
-from .viz import _mutable_defaults, plot_epochs, _drop_log_stats
-from .utils import check_fname, logger, verbose
-from .externals import six
+from .viz import (plot_epochs, _drop_log_stats,
+                  plot_epochs_psd, plot_epochs_psd_topomap)
+from .utils import (check_fname, logger, verbose, _check_type_picks,
+                    _time_mask, check_random_state, object_hash)
+from .externals.six import iteritems, string_types
 from .externals.six.moves import zip
-from .utils import deprecated, _check_type_picks
 
 
-class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
+def _save_split(epochs, fname, part_idx, n_parts):
+    """Split epochs"""
+
+    # insert index in filename
+    path, base = op.split(fname)
+    idx = base.find('.')
+    if part_idx > 0:
+        fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
+                                            base[idx + 1:]))
+
+    next_fname = None
+    if part_idx < n_parts - 1:
+        next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
+                                                 base[idx + 1:]))
+        next_idx = part_idx + 1
+
+    fid = start_file(fname)
+
+    info = epochs.info
+    meas_id = info['meas_id']
+
+    start_block(fid, FIFF.FIFFB_MEAS)
+    write_id(fid, FIFF.FIFF_BLOCK_ID)
+    if info['meas_id'] is not None:
+        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+
+    # Write measurement info
+    write_meas_info(fid, info)
+
+    # One or more evoked data sets
+    start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    start_block(fid, FIFF.FIFFB_EPOCHS)
+
+    # write events out after getting data to ensure bad events are dropped
+    data = epochs.get_data()
+    start_block(fid, FIFF.FIFFB_MNE_EVENTS)
+    write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
+    mapping_ = ';'.join([k + ':' + str(v) for k, v in
+                         epochs.event_id.items()])
+    write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
+    end_block(fid, FIFF.FIFFB_MNE_EVENTS)
+
+    # First and last sample
+    first = int(epochs.times[0] * info['sfreq'])
+    last = first + len(epochs.times) - 1
+    write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
+    write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
+
+    # save baseline
+    if epochs.baseline is not None:
+        bmin, bmax = epochs.baseline
+        bmin = epochs.times[0] if bmin is None else bmin
+        bmax = epochs.times[-1] if bmax is None else bmax
+        write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
+        write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
+
+    # The epochs itself
+    decal = np.empty(info['nchan'])
+    for k in range(info['nchan']):
+        decal[k] = 1.0 / (info['chs'][k]['cal'] *
+                          info['chs'][k].get('scale', 1.0))
+
+    data *= decal[np.newaxis, :, np.newaxis]
+
+    write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
+
+    # undo modifications to data
+    data /= decal[np.newaxis, :, np.newaxis]
+
+    write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
+                 json.dumps(epochs.drop_log))
+
+    write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
+              epochs.selection)
+
+    # And now write the next file info in case epochs are split on disk
+    if next_fname is not None and n_parts > 1:
+        start_block(fid, FIFF.FIFFB_REF)
+        write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
+        write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
+        if meas_id is not None:
+            write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
+        write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
+        end_block(fid, FIFF.FIFFB_REF)
+
+    end_block(fid, FIFF.FIFFB_EPOCHS)
+    end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
+
+
+class _BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+                  SetChannelsMixin, InterpolationMixin, FilterMixin,
+                  ToDataFrameMixin):
     """Abstract base class for Epochs-type classes
 
     This class provides basic functionality and should never be instantiated
     directly. See Epochs below for an explanation of the parameters.
     """
-    def __init__(self, info, event_id, tmin, tmax, baseline=(None, 0),
+    def __init__(self, info, data, events, event_id, tmin, tmax,
+                 baseline=(None, 0), raw=None,
                  picks=None, name='Unknown', reject=None, flat=None,
                  decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
-                 add_eeg_ref=True, verbose=None):
+                 add_eeg_ref=True, proj=True, on_missing='error',
+                 preload_at_end=False, selection=None, drop_log=None,
+                 verbose=None):
 
         self.verbose = verbose
         self.name = name
 
-        if isinstance(event_id, dict):
-            if not all([isinstance(v, int) for v in event_id.values()]):
+        if on_missing not in ['error', 'warning', 'ignore']:
+            raise ValueError('on_missing must be one of: error, '
+                             'warning, ignore. Got: %s' % on_missing)
+
+        # check out event_id dict
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+        elif isinstance(event_id, dict):
+            if not all(isinstance(v, int) for v in event_id.values()):
                 raise ValueError('Event IDs must be of type integer')
-            if not all([isinstance(k, string_types) for k in event_id]):
+            if not all(isinstance(k, string_types) for k in event_id):
                 raise ValueError('Event names must be of type str')
-            self.event_id = event_id
         elif isinstance(event_id, list):
-            if not all([isinstance(v, int) for v in event_id]):
+            if not all(isinstance(v, int) for v in event_id):
                 raise ValueError('Event IDs must be of type integer')
-            self.event_id = dict(zip((str(i) for i in event_id), event_id))
+            event_id = dict(zip((str(i) for i in event_id), event_id))
         elif isinstance(event_id, int):
-            self.event_id = {str(event_id): event_id}
+            event_id = {str(event_id): event_id}
         else:
             raise ValueError('event_id must be dict or int.')
+        self.event_id = event_id
+        del event_id
+
+        if events is not None:  # RtEpochs can have events=None
+
+            if events.dtype.kind not in ['i', 'u']:
+                raise ValueError('events must be an array of type int')
+            if events.ndim != 2 or events.shape[1] != 3:
+                raise ValueError('events must be 2D with 3 columns')
+
+            for key, val in self.event_id.items():
+                if val not in events[:, 2]:
+                    msg = ('No matching events found for %s '
+                           '(event id %i)' % (key, val))
+                    if on_missing == 'error':
+                        raise ValueError(msg)
+                    elif on_missing == 'warning':
+                        logger.warning(msg)
+                        warnings.warn(msg)
+                    else:  # on_missing == 'ignore':
+                        pass
+
+            values = list(self.event_id.values())
+            selected = in1d(events[:, 2], values)
+            if selection is None:
+                self.selection = np.where(selected)[0]
+            else:
+                self.selection = selection
+            if drop_log is None:
+                self.drop_log = [list() if k in self.selection else ['IGNORED']
+                                 for k in range(len(events))]
+            else:
+                self.drop_log = drop_log
+            events = events[selected]
+            n_events = len(events)
+            if n_events > 1:
+                if np.diff(events.astype(np.int64)[:, 0]).min() <= 0:
+                    warnings.warn('The events passed to the Epochs '
+                                  'constructor are not chronologically '
+                                  'ordered.', RuntimeWarning)
+
+            if n_events > 0:
+                logger.info('%d matching events found' % n_events)
+            else:
+                raise ValueError('No desired events found.')
+            self.events = events
+            del events
+        else:
+            self.drop_log = list()
+            self.selection = np.array([], int)
+            # do not set self.events here, let subclass do it
 
         # check reject_tmin and reject_tmax
         if (reject_tmin is not None) and (reject_tmin < tmin):
@@ -80,7 +235,7 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if (reject_tmin is not None) and (reject_tmax is not None):
             if reject_tmin >= reject_tmax:
                 raise ValueError('reject_tmin needs to be < reject_tmax')
-        if not detrend in [None, 0, 1]:
+        if detrend not in [None, 0, 1]:
             raise ValueError('detrend must be None, 0, or 1')
 
         # check that baseline is in available data
@@ -97,82 +252,242 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                     err = ("Baseline interval (tmax = %s) is outside of epoch "
                            "data (tmax = %s)" % (baseline_tmax, tmax))
                     raise ValueError(err)
+        if tmin > tmax:
+            raise ValueError('tmin has to be less than or equal to tmax')
 
         self.tmin = tmin
         self.tmax = tmax
         self.baseline = baseline
-        self.reject = reject
         self.reject_tmin = reject_tmin
         self.reject_tmax = reject_tmax
-        self.flat = flat
-        self.decim = decim = int(decim)
-        self._bad_dropped = False
-        self.drop_log = None
-        self.selection = None
         self.detrend = detrend
-
-        # Handle measurement info
+        self._raw = raw
         self.info = info
+        del info
+
         if picks is None:
             picks = list(range(len(self.info['ch_names'])))
         else:
-            self.info['chs'] = [self.info['chs'][k] for k in picks]
-            self.info['ch_names'] = [self.info['ch_names'][k] for k in picks]
-            self.info['nchan'] = len(picks)
+            self.info = pick_info(self.info, picks)
         self.picks = _check_type_picks(picks)
-
         if len(picks) == 0:
             raise ValueError("Picks cannot be empty.")
 
+        if data is None:
+            self.preload = False
+            self._data = None
+        else:
+            assert decim == 1
+            if data.ndim != 3 or data.shape[2] != \
+                    round((tmax - tmin) * self.info['sfreq']) + 1:
+                raise RuntimeError('bad data shape')
+            self.preload = True
+            self._data = data
+        self._offset = None
+
         # Handle times
-        if tmin >= tmax:
-            raise ValueError('tmin has to be smaller than tmax')
         sfreq = float(self.info['sfreq'])
-        n_times_min = int(round(tmin * sfreq))
-        n_times_max = int(round(tmax * sfreq))
-        times = np.arange(n_times_min, n_times_max + 1, dtype=np.float) / sfreq
-        self.times = times
-        self._raw_times = times  # times before decimation
-        self._epoch_stop = ep_len = len(self.times)
-        if decim > 1:
-            new_sfreq = sfreq / decim
-            lowpass = self.info['lowpass']
-            if new_sfreq < 2.5 * lowpass:  # nyquist says 2 but 2.5 is safer
-                msg = ('The measurement information indicates a low-pass '
-                       'frequency of %g Hz. The decim=%i parameter will '
-                       'result in a sampling frequency of %g Hz, which can '
-                       'cause aliasing artifacts.'
-                       % (lowpass, decim, new_sfreq))
-                warnings.warn(msg)
-
-            i_start = n_times_min % decim
-            self._decim_idx = slice(i_start, ep_len, decim)
-            self.times = self.times[self._decim_idx]
-            self.info['sfreq'] = new_sfreq
-
-        self.preload = False
-        self._data = None
-        self._offset = None
+        start_idx = int(round(self.tmin * sfreq))
+        self._raw_times = np.arange(start_idx,
+                                    int(round(self.tmax * sfreq)) + 1) / sfreq
+        self._decim = 1
+        # this method sets the self.times property
+        self.decimate(decim)
 
         # setup epoch rejection
-        self._reject_setup()
+        self.reject = None
+        self.flat = None
+        self._reject_setup(reject, flat)
 
-    def _reject_setup(self):
-        """Sets self._reject_time and self._channel_type_idx (called from
-        __init__)
+        # do the rest
+        valid_proj = [True, 'delayed', False]
+        if proj not in valid_proj:
+            raise ValueError('"proj" must be one of %s, not %s'
+                             % (valid_proj, proj))
+        if proj == 'delayed':
+            self._do_delayed_proj = True
+            logger.info('Entering delayed SSP mode.')
+        else:
+            self._do_delayed_proj = False
+
+        activate = False if self._do_delayed_proj else proj
+        self._projector, self.info = setup_proj(self.info, add_eeg_ref,
+                                                activate=activate)
+
+        if preload_at_end:
+            assert self._data is None
+            assert self.preload is False
+            self.load_data()
+
+    def load_data(self):
+        """Load the data if not already preloaded
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The epochs object.
+
+        Notes
+        -----
+        This function operates in-place.
+
+        .. versionadded:: 0.10.0
         """
-        if self.reject is None and self.flat is None:
+        if self.preload:
             return
+        self._data = self._get_data()
+        self.preload = True
+        self._decim_slice = slice(None, None, None)
+        self._decim = 1
+        self._raw_times = self.times
+        assert self._data.shape[-1] == len(self.times)
+        return self
+
+    def decimate(self, decim, copy=False):
+        """Decimate the epochs
+
+        Parameters
+        ----------
+        decim : int
+            The amount to decimate data.
+        copy : bool
+            If True, operate on and return a copy of the Epochs object.
 
+        Returns
+        -------
+        epochs : instance of Epochs
+            The decimated Epochs object.
+
+        Notes
+        -----
+        Decimation can be done multiple times. For example,
+        ``epochs.decimate(2).decimate(2)`` will be the same as
+        ``epochs.decimate(4)``.
+
+        .. versionadded:: 0.10.0
+        """
+        if decim < 1 or decim != int(decim):
+            raise ValueError('decim must be an integer > 0')
+        decim = int(decim)
+        epochs = self.copy() if copy else self
+        del self
+
+        new_sfreq = epochs.info['sfreq'] / float(decim)
+        lowpass = epochs.info['lowpass']
+        if decim > 1 and lowpass is None:
+            warnings.warn('The measurement information indicates data is not '
+                          'low-pass filtered. The decim=%i parameter will '
+                          'result in a sampling frequency of %g Hz, which can '
+                          'cause aliasing artifacts.'
+                          % (decim, new_sfreq))
+        elif decim > 1 and new_sfreq < 2.5 * lowpass:
+            warnings.warn('The measurement information indicates a low-pass '
+                          'frequency of %g Hz. The decim=%i parameter will '
+                          'result in a sampling frequency of %g Hz, which can '
+                          'cause aliasing artifacts.'
+                          % (lowpass, decim, new_sfreq))  # > 50% nyquist limit
+
+        epochs._decim *= decim
+        start_idx = int(round(epochs._raw_times[0] * (epochs.info['sfreq'] *
+                                                      epochs._decim)))
+        i_start = start_idx % epochs._decim
+        decim_slice = slice(i_start, len(epochs._raw_times), epochs._decim)
+        epochs.info['sfreq'] = new_sfreq
+        if epochs.preload:
+            epochs._data = epochs._data[:, :, decim_slice].copy()
+            epochs._raw_times = epochs._raw_times[decim_slice].copy()
+            epochs._decim_slice = slice(None, None, None)
+            epochs._decim = 1
+            epochs.times = epochs._raw_times
+        else:
+            epochs._decim_slice = decim_slice
+            epochs.times = epochs._raw_times[epochs._decim_slice]
+        return epochs
+
+    @verbose
+    def apply_baseline(self, baseline, verbose=None):
+        """Baseline correct epochs
+
+        Parameters
+        ----------
+        baseline : tuple of length 2
+            The time interval to apply baseline correction. (a, b) is the
+            interval is between "a (s)" and "b (s)". If a is None the beginning
+            of the data is used and if b is None then b is set to the end of
+            the interval. If baseline is equal to (None, None) all the time
+            interval is used.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        epochs : instance of Epochs
+            The baseline-corrected Epochs object.
+
+        Notes
+        -----
+        Baseline correction can be done multiple times.
+
+        .. versionadded:: 0.10.0
+        """
+        if not isinstance(baseline, tuple) or len(baseline) != 2:
+            raise ValueError('`baseline=%s` is an invalid argument.'
+                             % str(baseline))
+
+        data = self._data
+        picks = pick_types(self.info, meg=True, eeg=True, stim=False,
+                           ref_meg=True, eog=True, ecg=True,
+                           emg=True, exclude=[])
+        data[:, picks, :] = rescale(data[:, picks, :], self.times, baseline,
+                                    'mean', copy=False)
+        self.baseline = baseline
+
+    def _reject_setup(self, reject, flat):
+        """Sets self._reject_time and self._channel_type_idx"""
         idx = channel_indices_by_type(self.info)
+        for rej, kind in zip((reject, flat), ('reject', 'flat')):
+            if not isinstance(rej, (type(None), dict)):
+                raise TypeError('reject and flat must be dict or None, not %s'
+                                % type(rej))
+            if isinstance(rej, dict):
+                bads = set(rej.keys()) - set(idx.keys())
+                if len(bads) > 0:
+                    raise KeyError('Unknown channel types found in %s: %s'
+                                   % (kind, bads))
+
         for key in idx.keys():
-            if (self.reject is not None and key in self.reject) \
-                    or (self.flat is not None and key in self.flat):
+            if (reject is not None and key in reject) \
+                    or (flat is not None and key in flat):
                 if len(idx[key]) == 0:
                     raise ValueError("No %s channel found. Cannot reject based"
                                      " on %s." % (key.upper(), key.upper()))
-
+            # now check to see if our rejection and flat are getting more
+            # restrictive
+            old_reject = self.reject if self.reject is not None else dict()
+            new_reject = reject if reject is not None else dict()
+            old_flat = self.flat if self.flat is not None else dict()
+            new_flat = flat if flat is not None else dict()
+            bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
+                       '{kind} values must be at least as stringent as '
+                       'previous ones')
+            for key in set(new_reject.keys()).union(old_reject.keys()):
+                old = old_reject.get(key, np.inf)
+                new = new_reject.get(key, np.inf)
+                if new > old:
+                    raise ValueError(bad_msg.format(kind='reject', key=key,
+                                                    new=new, old=old, op='>'))
+            for key in set(new_flat.keys()).union(old_flat.keys()):
+                old = old_flat.get(key, -np.inf)
+                new = new_flat.get(key, -np.inf)
+                if new < old:
+                    raise ValueError(bad_msg.format(kind='flat', key=key,
+                                                    new=new, old=old, op='<'))
+
+        # after validation, set parameters
+        self._bad_dropped = False
         self._channel_type_idx = idx
+        self.reject = reject
+        self.flat = flat
 
         if (self.reject_tmin is None) and (self.reject_tmax is None):
             self._reject_time = None
@@ -210,9 +525,14 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                             ignore_chs=self.info['bads'])
 
     @verbose
-    def _preprocess(self, epoch, verbose=None):
-        """ Aux Function
+    def _detrend_offset_decim(self, epoch, verbose=None):
+        """Aux Function: detrend, baseline correct, offset, decim
+
+        Note: operates inplace
         """
+        if epoch is None:
+            return None
+
         # Detrend
         if self.detrend is not None:
             picks = pick_types(self.info, meg=True, eeg=True, stim=False,
@@ -231,25 +551,10 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if self._offset is not None:
             epoch += self._offset
 
-        # Decimate
-        if self.decim > 1:
-            epoch = epoch[:, self._decim_idx]
+        # Decimate if necessary (i.e., epoch not preloaded)
+        epoch = epoch[:, self._decim_slice]
         return epoch
 
-    def get_data(self):
-        """Get all epochs as a 3D array
-
-        Returns
-        -------
-        data : array of shape [n_epochs, n_channels, n_times]
-            The epochs data
-        """
-        if self.preload:
-            return self._data
-        else:
-            data = self._get_data_from_disk()
-            return data
-
     def iter_evoked(self):
         """Iterate over Evoked objects with nave=1
         """
@@ -258,7 +563,7 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         while True:
             data, event_id = self.next(True)
             tmin = self.times[0]
-            info = cp.deepcopy(self.info)
+            info = deepcopy(self.info)
 
             yield EvokedArray(data, info, tmin, comment=str(event_id))
 
@@ -336,20 +641,6 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         return self
 
-    def _get_data_from_disk(self, out=True, verbose=None):
-        raise NotImplementedError('_get_data_from_disk() must be implemented '
-                                  'in derived class.')
-
-    def __iter__(self):
-        """To make iteration over epochs easy.
-        """
-        self._current = 0
-        return self
-
-    def next(self, return_event_id=False):
-        raise NotImplementedError('next() must be implemented in derived '
-                                  'class.')
-
     def __next__(self, *args, **kwargs):
         """Wrapper for Py3k"""
         return self.next(*args, **kwargs)
@@ -370,8 +661,8 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         Returns
         -------
-        evoked : Evoked instance
-            The averaged epochs
+        evoked : instance of Evoked
+            The averaged epochs.
         """
 
         return self._compute_mean_or_stderr(picks, 'ave')
@@ -387,8 +678,8 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         Returns
         -------
-        evoked : Evoked instance
-            The standard error over epochs
+        evoked : instance of Evoked
+            The standard error over epochs.
         """
         return self._compute_mean_or_stderr(picks, 'stderr')
 
@@ -399,12 +690,11 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         n_channels = len(self.ch_names)
         n_times = len(self.times)
+
         if self.preload:
             n_events = len(self.events)
-            if not _do_std:
-                data = np.mean(self._data, axis=0)
-            else:
-                data = np.std(self._data, axis=0)
+            fun = np.std if _do_std else np.mean
+            data = fun(self._data, axis=0)
             assert len(self.events) == len(self._data)
         else:
             data = np.zeros((n_channels, n_times))
@@ -421,7 +711,7 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             # convert to stderr if requested, could do in one pass but do in
             # two (slower) in case there are large numbers
             if _do_std:
-                data_mean = cp.copy(data)
+                data_mean = data.copy()
                 data.fill(0.)
                 for e in self:
                     data += (e - data_mean) ** 2
@@ -432,9 +722,9 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         else:
             _aspect_kind = FIFF.FIFFV_ASPECT_STD_ERR
             data /= np.sqrt(n_events)
-        kind = aspect_rev.get(str(_aspect_kind), 'Unknown')
+        kind = _aspect_rev.get(str(_aspect_kind), 'Unknown')
 
-        info = cp.deepcopy(self.info)
+        info = deepcopy(self.info)
         evoked = EvokedArray(data, info, tmin=self.times[0],
                              comment=self.name, nave=n_events, kind=kind,
                              verbose=self.verbose)
@@ -454,9 +744,6 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if len(evoked.info['ch_names']) == 0:
             raise ValueError('No data channel found when averaging.')
 
-        # otherwise the apply_proj will be confused
-        evoked.proj = True if self.proj is True else None
-
         if evoked.nave < 1:
             warnings.warn('evoked object is empty (based on less '
                           'than 1 epoch)', RuntimeWarning)
@@ -465,316 +752,247 @@ class _BaseEpochs(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
     @property
     def ch_names(self):
+        """Channel names"""
         return self.info['ch_names']
 
-    def plot(self, epoch_idx=None, picks=None, scalings=None,
-             title_str='#%003i', show=True, block=False):
-        """ Visualize single trials using Trellis plot.
+    def plot(self, picks=None, scalings=None, show=True,
+             block=False, n_epochs=20,
+             n_channels=20, title=None):
+        """Visualize epochs.
+
+        Bad epochs can be marked with a left click on top of the epoch. Bad
+        channels can be selected by clicking the channel name on the left side
+        of the main axes. Calling this function drops all the selected bad
+        epochs as well as bad epochs marked beforehand with rejection
+        parameters.
 
         Parameters
         ----------
-        epoch_idx : array-like | int | None
-            The epochs to visualize. If None, the frist 20 epochs are shoen.
-            Defaults to None.
         picks : array-like of int | None
             Channels to be included. If None only good data channels are used.
             Defaults to None
         scalings : dict | None
-            Scale factors for the traces. If None, defaults to:
-            `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
-                  emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1,
-                  chpi=1e-4)`
-        title_str : None | str
-            The string formatting to use for axes titles. If None, no titles
-            will be shown. Defaults expand to ``#001, #002, ...``
+            Scale factors for the traces. If None, defaults to
+            ``dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+            emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)``.
         show : bool
             Whether to show the figure or not.
         block : bool
             Whether to halt program execution until the figure is closed.
             Useful for rejecting bad trials on the fly by clicking on a
             sub plot.
+        n_epochs : int
+            The number of epochs per view.
+        n_channels : int
+            The number of channels per view on mne_browse_epochs. If trellis is
+            True, this parameter has no effect. Defaults to 20.
+        title : str | None
+            The title of the window. If None, epochs name will be displayed.
+            If trellis is True, this parameter has no effect.
+            Defaults to None.
 
         Returns
         -------
         fig : Instance of matplotlib.figure.Figure
             The figure.
-        """
-        return plot_epochs(self, epoch_idx=epoch_idx, picks=picks,
-                           scalings=scalings, title_str=title_str,
-                           show=show, block=block)
-
-
-class Epochs(_BaseEpochs):
-    """List of Epochs
-
-    Parameters
-    ----------
-    raw : Raw object
-        An instance of Raw.
-    events : array, of shape [n_events, 3]
-        The events typically returned by the read_events function.
-        If some events don't match the events of interest as specified
-        by event_id, they will be marked as 'IGNORED' in the drop log.
-    event_id : int | list of int | dict | None
-        The id of the event to consider. If dict,
-        the keys can later be used to acces associated events. Example:
-        dict(auditory=1, visual=3). If int, a dict will be created with
-        the id as string. If a list, all events with the IDs specified
-        in the list are used. If None, all events will be used with
-        and a dict is created with string integer names corresponding
-        to the event id integers.
-    tmin : float
-        Start time before event.
-    tmax : float
-        End time after event.
-    name : string
-        Comment that describes the Evoked data created.
-    baseline : None or tuple of length 2 (default (None, 0))
-        The time interval to apply baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal to (None, None) all the time
-        interval is used.
-        The baseline (a, b) includes both endpoints, i.e. all
-        timepoints t such that a <= t <= b.
-    picks : array-like of int | None (default)
-        Indices of channels to include (if None, all channels
-        are used).
-    preload : boolean
-        Load all epochs from disk when creating the object
-        or wait before accessing each epoch (more memory
-        efficient but can be slower).
-    reject : dict
-        Epoch rejection parameters based on peak to peak amplitude.
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-        If reject is None then no rejection is done.
-        Values are float. Example::
-
-            reject = dict(grad=4000e-13, # T / m (gradiometers)
-                          mag=4e-12, # T (magnetometers)
-                          eeg=40e-6, # uV (EEG channels)
-                          eog=250e-6 # uV (EOG channels)
-                          )
-
-    flat : dict
-        Epoch rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
-        If flat is None then no rejection is done.
-    proj : bool | 'delayed'
-        Apply SSP projection vectors. If proj is 'delayed' and reject is not
-        None the single epochs will be projected before the rejection
-        decision, but used in unprojected state if they are kept.
-        This way deciding which projection vectors are good can be postponed
-        to the evoked stage without resulting in lower epoch counts and
-        without producing results different from early SSP application
-        given comparable parameters. Note that in this case baselining,
-        detrending and temporal decimation will be postponed.
-        If proj is False no projections will be applied which is the
-        recommended value if SSPs are not used for cleaning the data.
-    decim : int
-        Factor by which to downsample the data from the raw file upon import.
-        Warning: This simply selects every nth sample, data is not filtered
-        here. If data is not properly filtered, aliasing artifacts may occur.
-    reject_tmin : scalar | None
-        Start of the time window used to reject epochs (with the default None,
-        the window will start with tmin).
-    reject_tmax : scalar | None
-        End of the time window used to reject epochs (with the default None,
-        the window will end with tmax).
-    detrend : int | None
-        If 0 or 1, the data channels (MEG and EEG) will be detrended when
-        loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
-        is no detrending. Note that detrending is performed before baseline
-        correction. If no DC offset is preferred (zeroth order detrending),
-        either turn off baseline correction, as this may introduce a DC
-        shift, or set baseline correction to use the entire time interval
-        (will yield equivalent results but be slower).
-    add_eeg_ref : bool
-        If True, an EEG average reference will be added (unless one
-        already exists).
-    on_missing : str
-        What to do if an event id is not found in the recording.
-        Valid keys are 'error' | 'warning' | 'ignore'
-        Default is 'error'. If on_missing is 'warning' it will proceed but
-        warn, if 'ignore' it will proceed silently.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-        Defaults to raw.verbose.
-
-    Attributes
-    ----------
-    info: dict
-        Measurement info.
-    event_id : dict
-        Names of  of conditions corresponding to event_ids.
-    ch_names : list of string
-        List of channels' names.
-    selection : array
-        List of indices of selected events (not dropped or ignored etc.). For
-        example, if the original event array had 4 events and the second event
-        has been dropped, this attribute would be np.array([0, 2, 3]).
-    preload : bool
-        Indicates whether epochs are in memory.
-    drop_log : list of lists
-        A list of the same length as the event array used to initialize the
-        Epochs object. If the i-th original event is still part of the
-        selection, drop_log[i] will be an empty list; otherwise it will be
-        a list of the reasons the event is not longer in the selection, e.g.:
-
-        'IGNORED' if it isn't part of the current subset defined by the user;
-        'NO DATA' or 'TOO SHORT' if epoch didn't contain enough data;
-        names of channels that exceeded the amplitude threshold;
-        'EQUALIZED_COUNTS' (see equalize_event_counts);
-        or user-defined reasons (see drop_epochs).
-    verbose : bool, str, int, or None
-        See above.
-
-    Notes
-    -----
-    For indexing and slicing:
-
-    epochs[idx] : Epochs
-        Return Epochs object with a subset of epochs (supports single
-        index and python-style slicing)
-
-    For subset selection using categorial labels:
-
-    epochs['name'] : Epochs
-        Return Epochs object with a subset of epochs corresponding to an
-        experimental condition as specified by 'name'.
 
-    epochs[['name_1', 'name_2', ... ]] : Epochs
-        Return Epochs object with a subset of epochs corresponding to multiple
-        experimental conditions as specified by 'name_1', 'name_2', ... .
-
-    See also
-    --------
-    mne.epochs.combine_event_ids
-    mne.Epochs.equalize_event_counts
-    """
-    @verbose
-    def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
-                 picks=None, name='Unknown', preload=False, reject=None,
-                 flat=None, proj=True, decim=1, reject_tmin=None,
-                 reject_tmax=None, detrend=None, add_eeg_ref=True,
-                 on_missing='error', verbose=None):
-        if raw is None:
-            return
-        elif not isinstance(raw, _BaseRaw):
-            raise ValueError('The first argument to `Epochs` must be `None` '
-                             'or an instance of `mne.io.Raw`')
-        if on_missing not in ['error', 'warning', 'ignore']:
-            raise ValueError('on_missing must be one of: error, '
-                             'warning, ignore. Got: %s' % on_missing)
-
-        # prepare for calling the base constructor
-
-        # Handle measurement info
-        info = cp.deepcopy(raw.info)
-        # make sure projs are really copied.
-        info['projs'] = [cp.deepcopy(p) for p in info['projs']]
-
-        if event_id is None:  # convert to int to make typing-checks happy
-            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
-
-        proj = proj or raw.proj  # proj is on when applied in Raw
-
-        # call _BaseEpochs constructor
-        super(Epochs, self).__init__(info, event_id, tmin, tmax,
-                                     baseline=baseline, picks=picks, name=name,
-                                     reject=reject, flat=flat, decim=decim,
-                                     reject_tmin=reject_tmin,
-                                     reject_tmax=reject_tmax, detrend=detrend,
-                                     add_eeg_ref=add_eeg_ref, verbose=verbose)
-
-        # do the rest
-        self.raw = raw
-        proj = proj or raw.proj  # proj is on when applied in Raw
-        if proj not in [True, 'delayed', False]:
-            raise ValueError(r"'proj' must either be 'True', 'False' or "
-                             "'delayed'")
-        self.proj = proj
-        if self._check_delayed():
-            logger.info('Entering delayed SSP mode.')
-
-        activate = False if self._check_delayed() else self.proj
-        self._projector, self.info = setup_proj(self.info, add_eeg_ref,
-                                                activate=activate)
-
-        for key, val in self.event_id.items():
-            if val not in events[:, 2]:
-                msg = ('No matching events found for %s '
-                       '(event id %i)' % (key, val))
-                if on_missing == 'error':
-                    raise ValueError(msg)
-                elif on_missing == 'warning':
-                    logger.warning(msg)
-                    warnings.warn(msg)
-                else:  # on_missing == 'ignore':
-                    pass
-
-        # Select the desired events
-        values = list(self.event_id.values())
-        selected = in1d(events[:, 2], values)
-        self.events = events[selected]
-
-        n_events = len(self.events)
-        if n_events > 1:
-            if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
-                warnings.warn('The events passed to the Epochs constructor '
-                              'are not chronologically ordered.',
-                              RuntimeWarning)
-
-        if n_events > 0:
-            logger.info('%d matching events found' % n_events)
-        else:
-            raise ValueError('No desired events found.')
-
-        self.selection = np.where(selected)[0]
-        self.drop_log = []
-        for k in range(len(events)):
-            if events[k, 2] in values:
-                self.drop_log.append([])
-            else:
-                self.drop_log.append(['IGNORED'])
+        Notes
+        -----
+        The arrow keys (up/down/left/right) can
+        be used to navigate between channels and epochs and the scaling can be
+        adjusted with - and + (or =) keys, but this depends on the backend
+        matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
+        Full screen mode can be to toggled with f11 key. The amount of epochs
+        and channels per view can be adjusted with home/end and
+        page down/page up keys. Butterfly plot can be toggled with ``b`` key.
+        Right mouse click adds a vertical line to the plot.
+
+        .. versionadded:: 0.10.0
+        """
+        return plot_epochs(self, picks=picks, scalings=scalings,
+                           n_epochs=n_epochs, n_channels=n_channels,
+                           title=title, show=show, block=block)
 
-        self.preload = preload
-        if self.preload:
-            self._data = self._get_data_from_disk()
-            self.raw = None
-        else:
-            self._data = None
+    def plot_psd(self, fmin=0, fmax=np.inf, proj=False, n_fft=256,
+                 picks=None, ax=None, color='black', area_mode='std',
+                 area_alpha=0.33, n_overlap=0, dB=True,
+                 n_jobs=1, verbose=None, show=True):
+        """Plot the power spectral density across epochs
 
-    @deprecated('drop_picks will be removed in v0.9. Use drop_channels.')
-    def drop_picks(self, bad_picks):
-        """Drop some picks
+        Parameters
+        ----------
+        fmin : float
+            Start frequency to consider.
+        fmax : float
+            End frequency to consider.
+        proj : bool
+            Apply projection.
+        n_fft : int
+            Number of points to use in Welch FFT calculations.
+        picks : array-like of int | None
+            List of channels to use.
+        ax : instance of matplotlib Axes | None
+            Axes to plot into. If None, axes will be created.
+        color : str | tuple
+            A matplotlib-compatible color to use.
+        area_mode : str | None
+            Mode for plotting area. If 'std', the mean +/- 1 STD (across
+            channels) will be plotted. If 'range', the min and max (across
+            channels) will be plotted. Bad channels will be excluded from
+            these calculations. If None, no area will be plotted.
+        area_alpha : float
+            Alpha for the area.
+        n_overlap : int
+            The number of points of overlap between blocks.
+        dB : bool
+            If True, transform data to decibels.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+        show : bool
+            Show figure if True.
 
-        Allows to discard some channels.
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
         """
-        idx = [k for k, p in enumerate(self.picks) if p not in bad_picks]
-        self.picks = self.picks[idx]
+        return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
+                               n_fft=n_fft, picks=picks, ax=ax,
+                               color=color, area_mode=area_mode,
+                               area_alpha=area_alpha,
+                               n_overlap=n_overlap, dB=dB, n_jobs=n_jobs,
+                               verbose=None, show=show)
+
+    def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
+                         n_fft=256, ch_type=None,
+                         n_overlap=0, layout=None, cmap='RdBu_r',
+                         agg_fun=None, dB=True, n_jobs=1, normalize=False,
+                         cbar_fmt='%0.3f', outlines='head', show=True,
+                         verbose=None):
+        """Plot the topomap of the power spectral density across epochs
 
-        self.info = pick_info(self.info, idx, copy=False)
-
-        if self._projector is not None:
-            self._projector = self._projector[idx][:, idx]
+        Parameters
+        ----------
+        bands : list of tuple | None
+            The lower and upper frequency and the name for that band. If None,
+            (default) expands to:
+
+            bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                     (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+        vmin : float | callable | None
+            The value specifying the lower bound of the color range.
+            If None, and vmax is None, -vmax is used. Else np.min(data).
+            If callable, the output equals vmin(data).
+        vmax : float | callable | None
+            The value specifying the upper bound of the color range.
+            If None, the maximum absolute value is used. If callable, the
+            output equals vmax(data). Defaults to None.
+        proj : bool
+            Apply projection.
+        n_fft : int
+            Number of points to use in Welch FFT calculations.
+        ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
+            The channel type to plot. For 'grad', the gradiometers are
+            collected in
+            pairs and the RMS for each pair is plotted. If None, defaults to
+            'mag' if MEG data are present and to 'eeg' if only EEG data are
+            present.
+        n_overlap : int
+            The number of points of overlap between blocks.
+        layout : None | Layout
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout
+            file is inferred from the data; if no appropriate layout file was
+            found, the layout is automatically generated from the sensor
+            locations.
+        cmap : matplotlib colormap
+            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+            'Reds'.
+        agg_fun : callable
+            The function used to aggregate over frequencies.
+            Defaults to np.sum. if normalize is True, else np.mean.
+        dB : bool
+            If True, transform data to decibels (with ``10 * np.log10(data)``)
+            following the application of `agg_fun`. Only valid if normalize
+            is False.
+        n_jobs : int
+            Number of jobs to run in parallel.
+        normalize : bool
+            If True, each band will be devided by the total power. Defaults to
+            False.
+        cbar_fmt : str
+            The colorbar format. Defaults to '%0.3f'.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        show : bool
+            Show figure if True.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
 
-        if self.preload:
-            self._data = self._data[:, idx, :]
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        return plot_epochs_psd_topomap(
+            self, bands=bands, vmin=vmin, vmax=vmax, proj=proj, n_fft=n_fft,
+            ch_type=ch_type, n_overlap=n_overlap, layout=layout, cmap=cmap,
+            agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
+            cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
 
-    def drop_bad_epochs(self):
+    def drop_bad_epochs(self, reject='existing', flat='existing'):
         """Drop bad epochs without retaining the epochs data.
 
         Should be used before slicing operations.
 
         .. Warning:: Operation is slow since all epochs have to be read from
-            disk. To avoid reading epochs form disk multiple times, initialize
+            disk. To avoid reading epochs from disk multiple times, initialize
             Epochs object with preload=True.
 
+        Parameters
+        ----------
+        reject : dict | str | None
+            Rejection parameters based on peak-to-peak amplitude.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+            If reject is None then no rejection is done. If 'existing',
+            then the rejection parameters set at instantiation are used.
+        flat : dict | str | None
+            Rejection parameters based on flatness of signal.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+            are floats that set the minimum acceptable peak-to-peak amplitude.
+            If flat is None then no rejection is done. If 'existing',
+            then the flat parameters set at instantiation are used.
+
+        Notes
+        -----
+        Dropping bad epochs can be done multiple times with different
+        ``reject`` and ``flat`` parameters. However, once an epoch is
+        dropped, it is dropped forever, so if more lenient thresholds may
+        subsequently be applied, `epochs.copy` should be used.
         """
-        self._get_data_from_disk(out=False)
+        if reject == 'existing':
+            if flat == 'existing' and self._bad_dropped:
+                return
+            reject = self.reject
+        if flat == 'existing':
+            flat = self.flat
+        if any(isinstance(rej, string_types) and rej != 'existing' for
+               rej in (reject, flat)):
+            raise ValueError('reject and flat, if strings, must be "existing"')
+        self._reject_setup(reject, flat)
+        self._get_data(out=False)
 
     def drop_log_stats(self, ignore=['IGNORED']):
         """Compute the channel stats based on a drop_log from Epochs.
@@ -788,12 +1006,16 @@ class Epochs(_BaseEpochs):
         -------
         perc : float
             Total percentage of epochs dropped.
+
+        See Also
+        --------
+        plot_drop_log
         """
         return _drop_log_stats(self.drop_log, ignore)
 
     def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
                       color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
-                      show=True, return_fig=False):
+                      show=True):
         """Show the channel stats based on a drop_log from Epochs
 
         Parameters
@@ -813,9 +1035,6 @@ class Epochs(_BaseEpochs):
             The drop reasons to ignore.
         show : bool
             Show figure if True.
-        return_fig : bool
-            Return only figure handle if True. This argument will default
-            to True in v0.9 and then be removed.
 
         Returns
         -------
@@ -825,26 +1044,14 @@ class Epochs(_BaseEpochs):
             The figure.
         """
         if not self._bad_dropped:
-            print("Bad epochs have not yet been dropped.")
-            return
+            raise ValueError("You cannot use plot_drop_log since bad "
+                             "epochs have not yet been dropped. "
+                             "Use epochs.drop_bad_epochs().")
 
         from .viz import plot_drop_log
         return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
                              color=color, width=width, ignore=ignore,
-                             show=show, return_fig=return_fig)
-
-    def _check_delayed(self):
-        """ Aux method
-        """
-        is_delayed = False
-        if self.proj == 'delayed':
-            if self.reject is None:
-                raise RuntimeError('The delayed SSP mode was requested '
-                                   'but no rejection parameters are present. '
-                                   'Please add rejection parameters before '
-                                   'using this option.')
-            is_delayed = True
-        return is_delayed
+                             show=show)
 
     @verbose
     def drop_epochs(self, indices, reason='USER', verbose=None):
@@ -894,58 +1101,23 @@ class Epochs(_BaseEpochs):
         count = len(indices)
         logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
 
-    @verbose
-    def _get_epoch_from_disk(self, idx, proj, verbose=None):
-        """Load one epoch from disk"""
-        if self.raw is None:
-            # This should never happen, as raw=None only if preload=True
-            raise ValueError('An error has occurred, no valid raw file found.'
-                             ' Please report this to the mne-python '
-                             'developers.')
-        sfreq = self.raw.info['sfreq']
-
-        if self.events.ndim == 1:
-            # single event
-            event_samp = self.events[0]
-        else:
-            event_samp = self.events[idx, 0]
-
-        # Read a data segment
-        first_samp = self.raw.first_samp
-        start = int(round(event_samp + self.tmin * sfreq)) - first_samp
-        stop = start + self._epoch_stop
-        if start < 0:
-            return None, None
-
-        epoch_raw, _ = self.raw[self.picks, start:stop]
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Method to get a given epoch from disk"""
+        raise NotImplementedError
 
-        # setup list of epochs to handle delayed SSP
-        epochs = []
+    def _project_epoch(self, epoch):
+        """Helper to process a raw epoch based on the delayed param"""
         # whenever requested, the first epoch is being projected.
+        if epoch is None:  # can happen if t < 0
+            return None
+        proj = self._do_delayed_proj or self.proj
         if self._projector is not None and proj is True:
-            epochs += [np.dot(self._projector, epoch_raw)]
-        else:
-            epochs += [epoch_raw]
-
-        # in case the proj passed is True but self proj is not we
-        # have delayed SSP
-        if self.proj != proj:  # so append another unprojected epoch
-            epochs += [epoch_raw.copy()]
-
-        # only preprocess first candidate, to make delayed SSP working
-        # we need to postpone the preprocessing since projection comes
-        # first.
-        epochs[0] = self._preprocess(epochs[0])
-
-        # return a second None if nothing is projected
-        if len(epochs) == 1:
-            epochs += [None]
-
-        return epochs
+            epoch = np.dot(self._projector, epoch)
+        return epoch
 
     @verbose
-    def _get_data_from_disk(self, out=True, verbose=None):
-        """Load all data from disk
+    def _get_data(self, out=True, verbose=None):
+        """Load all data, dropping bad epochs along the way
 
         Parameters
         ----------
@@ -957,140 +1129,104 @@ class Epochs(_BaseEpochs):
             Defaults to self.verbose.
         """
         n_events = len(self.events)
-        data = np.array([])
+        # in case there are no good events
+        if self.preload:
+            # we will store our result in our existing array
+            data = self._data
+        else:
+            # we start out with an empty array, allocate only if necessary
+            data = np.empty((0, len(self.info['ch_names']), len(self.times)))
         if self._bad_dropped:
-            proj = False if self._check_delayed() else self.proj
             if not out:
                 return
-            for ii in range(n_events):
+            if self.preload:
+                return data
+
+            # we need to load from disk, drop, and return data
+            for idx in range(n_events):
                 # faster to pre-allocate memory here
-                epoch, epoch_raw = self._get_epoch_from_disk(ii, proj=proj)
-                if ii == 0:
-                    data = np.empty((n_events, epoch.shape[0],
-                                     epoch.shape[1]), dtype=epoch.dtype)
-                if self._check_delayed():
-                    epoch = epoch_raw
-                data[ii] = epoch
+                epoch_noproj = self._get_epoch_from_raw(idx)
+                epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                if self._do_delayed_proj:
+                    epoch_out = epoch_noproj
+                else:
+                    epoch_out = self._project_epoch(epoch_noproj)
+                if idx == 0:
+                    data = np.empty((n_events, len(self.ch_names),
+                                     len(self.times)), dtype=epoch_out.dtype)
+                data[idx] = epoch_out
         else:
-            proj = True if self._check_delayed() else self.proj
-            good_events = []
+            # bads need to be dropped, this might occur after a preload
+            # e.g., when calling drop_bad_epochs w/new params
+            good_idx = []
             n_out = 0
-            for idx, sel in zip(range(n_events), self.selection):
-                epoch, epoch_raw = self._get_epoch_from_disk(idx, proj=proj)
+            assert n_events == len(self.selection)
+            for idx, sel in enumerate(self.selection):
+                if self.preload:  # from memory
+                    if self._do_delayed_proj:
+                        epoch_noproj = self._data[idx]
+                        epoch = self._project_epoch(epoch_noproj)
+                    else:
+                        epoch_noproj = None
+                        epoch = self._data[idx]
+                else:  # from disk
+                    epoch_noproj = self._get_epoch_from_raw(idx)
+                    epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                    epoch = self._project_epoch(epoch_noproj)
+                epoch_out = epoch_noproj if self._do_delayed_proj else epoch
                 is_good, offenders = self._is_good_epoch(epoch)
-                if is_good:
-                    good_events.append(idx)
-                    if self._check_delayed():
-                        epoch = epoch_raw
-                    if out:
-                        # faster to pre-allocate, then trim as necessary
-                        if n_out == 0:
-                            data = np.empty((n_events, epoch.shape[0],
-                                             epoch.shape[1]),
-                                            dtype=epoch.dtype, order='C')
-                        data[n_out] = epoch
-                        n_out += 1
-                else:
+                if not is_good:
                     self.drop_log[sel] += offenders
+                    continue
+                good_idx.append(idx)
+
+                # store the epoch if there is a reason to (output or update)
+                if out or self.preload:
+                    # faster to pre-allocate, then trim as necessary
+                    if n_out == 0 and not self.preload:
+                        data = np.empty((n_events, epoch_out.shape[0],
+                                         epoch_out.shape[1]),
+                                        dtype=epoch_out.dtype, order='C')
+                    data[n_out] = epoch_out
+                    n_out += 1
 
-            self.selection = self.selection[good_events]
-            self.events = np.atleast_2d(self.events[good_events])
             self._bad_dropped = True
-            logger.info("%d bad epochs dropped"
-                        % (n_events - len(good_events)))
-            if not out:
-                return
-            # just take the good events
-            assert len(good_events) == n_out
-            if n_out > 0:
-                # slicing won't free the space, so we resize
-                # we have ensured the C-contiguity of the array in allocation
-                # so this operation will be safe unless np is very broken
-                data.resize((n_out,) + data.shape[1:], refcheck=False)
-        return data
+            logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
 
-    @verbose
-    def _is_good_epoch(self, data, verbose=None):
-        """Determine if epoch is good"""
-        if data is None:
-            return False, ['NO_DATA']
-        n_times = len(self.times)
-        if data.shape[1] < n_times:
-            # epoch is too short ie at the end of the data
-            return False, ['TOO_SHORT']
-        if self.reject is None and self.flat is None:
-            return True, None
-        else:
-            if self._reject_time is not None:
-                data = data[:, self._reject_time]
+            # Now update our properties
+            if len(good_idx) == 0:  # silly fix for old numpy index error
+                self.selection = np.array([], int)
+                self.events = np.empty((0, 3))
+            else:
+                self.selection = self.selection[good_idx]
+                self.events = np.atleast_2d(self.events[good_idx])
 
-            return _is_good(data, self.ch_names, self._channel_type_idx,
-                            self.reject, self.flat, full_report=True,
-                            ignore_chs=self.info['bads'])
+            # adjust the data size if there is a reason to (output or update)
+            if out or self.preload:
+                data.resize((n_out,) + data.shape[1:], refcheck=False)
+
+        return data if out else None
 
     def get_data(self):
         """Get all epochs as a 3D array
 
         Returns
         -------
-        data : array of shape [n_epochs, n_channels, n_times]
-            The epochs data
-        """
-        if self.preload:
-            data_ = self._data
-        else:
-            data_ = self._get_data_from_disk()
-        if self._check_delayed():
-            data = np.zeros_like(data_)
-            for ii, e in enumerate(data_):
-                data[ii] = self._preprocess(e.copy(), self.verbose)
-        else:
-            data = data_
-
-        return data
-
-    def _reject_setup(self):
-        """Sets self._reject_time and self._channel_type_idx (called from
-        __init__)
+        data : array of shape (n_epochs, n_channels, n_times)
+            A copy of the epochs data.
         """
-        if self.reject is None and self.flat is None:
-            return
-
-        idx = channel_indices_by_type(self.info)
-        for key in idx.keys():
-            if (self.reject is not None and key in self.reject) \
-                    or (self.flat is not None and key in self.flat):
-                if len(idx[key]) == 0:
-                    raise ValueError("No %s channel found. Cannot reject based"
-                                     " on %s." % (key.upper(), key.upper()))
-
-        self._channel_type_idx = idx
-
-        if (self.reject_tmin is None) and (self.reject_tmax is None):
-            self._reject_time = None
-        else:
-            if self.reject_tmin is None:
-                reject_imin = None
-            else:
-                idxs = np.nonzero(self.times >= self.reject_tmin)[0]
-                reject_imin = idxs[0]
-            if self.reject_tmax is None:
-                reject_imax = None
-            else:
-                idxs = np.nonzero(self.times <= self.reject_tmax)[0]
-                reject_imax = idxs[-1]
-
-            self._reject_time = slice(reject_imin, reject_imax)
+        return self._get_data()
 
     def __len__(self):
         """Number of epochs.
         """
         if not self._bad_dropped:
-            err = ("Since bad epochs have not been dropped, the length of the "
-                   "Epochs is not known. Load the Epochs with preload=True, "
-                   "or call Epochs.drop_bad_epochs(). To find the number of "
-                   "events in the Epochs, use len(Epochs.events).")
-            raise RuntimeError(err)
+            raise RuntimeError('Since bad epochs have not been dropped, the '
+                               'length of the Epochs is not known. Load the '
+                               'Epochs with preload=True, or call '
+                               'Epochs.drop_bad_epochs(). To find the number '
+                               'of events in the Epochs, use '
+                               'len(Epochs.events).')
         return len(self.events)
 
     def __iter__(self):
@@ -1101,27 +1237,37 @@ class Epochs(_BaseEpochs):
 
     def next(self, return_event_id=False):
         """To make iteration over epochs easy.
+
+        Parameters
+        ----------
+        return_event_id : bool
+            If True, return both an epoch and and event_id.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The epoch.
+        event_id : int
+            The event id. Only returned if ``return_event_id`` is ``True``.
         """
         if self.preload:
             if self._current >= len(self._data):
                 raise StopIteration
             epoch = self._data[self._current]
-            if self._check_delayed():
-                epoch = self._preprocess(epoch.copy(), self.verbose)
             self._current += 1
         else:
-            proj = True if self._check_delayed() else self.proj
             is_good = False
             while not is_good:
                 if self._current >= len(self.events):
                     raise StopIteration
-                epoch, epoch_raw = self._get_epoch_from_disk(self._current,
-                                                             proj=proj)
+                epoch_noproj = self._get_epoch_from_raw(self._current)
+                epoch_noproj = self._detrend_offset_decim(epoch_noproj)
+                epoch = self._project_epoch(epoch_noproj)
                 self._current += 1
                 is_good, _ = self._is_good_epoch(epoch)
             # If delayed-ssp mode, pass 'virgin' data after rejection decision.
-            if self._check_delayed():
-                epoch = self._preprocess(epoch_raw, self.verbose)
+            if self._do_delayed_proj:
+                epoch = epoch_noproj
 
         if not return_event_id:
             return epoch
@@ -1133,10 +1279,8 @@ class Epochs(_BaseEpochs):
     def __repr__(self):
         """ Build string representation
         """
-        if not self._bad_dropped:
-            s = 'n_events : %s (good & bad)' % len(self.events)
-        else:
-            s = 'n_events : %s (all good)' % len(self.events)
+        s = 'n_events : %s ' % len(self.events)
+        s += '(all good)' if self._bad_dropped else '(good & bad)'
         s += ', tmin : %s (s)' % self.tmin
         s += ', tmax : %s (s)' % self.tmax
         s += ', baseline : %s' % str(self.baseline)
@@ -1144,8 +1288,10 @@ class Epochs(_BaseEpochs):
             counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
                       for k, v in sorted(self.event_id.items())]
             s += ',\n %s' % ', '.join(counts)
-
-        return '<Epochs  |  %s>' % s
+        class_name = self.__class__.__name__
+        if class_name == '_BaseEpochs':
+            class_name = 'Epochs'
+        return '<%s  |  %s>' % (class_name, s)
 
     def _key_match(self, key):
         """Helper function for event dict use"""
@@ -1160,15 +1306,26 @@ class Epochs(_BaseEpochs):
         del self._data
         epochs = self.copy()
         self._data, epochs._data = data, data
+        del self
 
         if isinstance(key, string_types):
             key = [key]
 
         if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
+            if any('/' in k_i for k_i in epochs.event_id.keys()):
+                if any(k_e not in epochs.event_id for k_e in key):
+                    # Select a given key if the requested set of
+                    # '/'-separated types are a subset of the types in that key
+                    key = [k for k in epochs.event_id.keys()
+                           if all(set(k_i.split('/')).issubset(k.split('/'))
+                                  for k_i in key)]
+                    if len(key) == 0:
+                        raise KeyError('Attempting selection of events via '
+                                       'multiple/partial matching, but no '
+                                       'event matches all criteria.')
             select = np.any(np.atleast_2d([epochs._key_match(k)
                                            for k in key]), axis=0)
-            epochs.name = ('+'.join(key) if epochs.name == 'Unknown'
-                           else 'epochs_%s' % '+'.join(key))
+            epochs.name = '+'.join(key)
         else:
             select = key if isinstance(key, slice) else np.atleast_1d(key)
 
@@ -1178,12 +1335,12 @@ class Epochs(_BaseEpochs):
         epochs.selection = key_selection
         epochs.events = np.atleast_2d(epochs.events[select])
         if epochs.preload:
-            epochs._data = epochs._data[select]
-
+            # ensure that each Epochs instance owns its own data so we can
+            # resize later if necessary
+            epochs._data = np.require(epochs._data[select], requirements=['O'])
         # update event id to reflect new content of epochs
         epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
                                if v in epochs.events[:, 2])
-
         return epochs
 
     def crop(self, tmin=None, tmax=None, copy=False):
@@ -1191,9 +1348,9 @@ class Epochs(_BaseEpochs):
 
         Parameters
         ----------
-        tmin : float
+        tmin : float | None
             Start time of selection in seconds.
-        tmax : float
+        tmax : float | None
             End time of selection in seconds.
         copy : bool
             If False epochs is cropped in place.
@@ -1203,11 +1360,12 @@ class Epochs(_BaseEpochs):
         epochs : Epochs instance
             The cropped epochs.
 
-        Note
-        ----
+        Notes
+        -----
         Unlike Python slices, MNE time intervals include both their end points;
         crop(tmin, tmax) returns the interval tmin <= t <= tmax.
         """
+        # XXX this could be made to work on non-preloaded data...
         if not self.preload:
             raise RuntimeError('Modifying data of epochs is only supported '
                                'when preloading is used. Use preload=True '
@@ -1227,19 +1385,20 @@ class Epochs(_BaseEpochs):
                           "tmax is set to epochs.tmax")
             tmax = self.tmax
 
-        tmask = (self.times >= tmin) & (self.times <= tmax)
+        tmask = _time_mask(self.times, tmin, tmax)
         tidx = np.where(tmask)[0]
 
         this_epochs = self if not copy else self.copy()
         this_epochs.tmin = this_epochs.times[tidx[0]]
         this_epochs.tmax = this_epochs.times[tidx[-1]]
         this_epochs.times = this_epochs.times[tmask]
+        this_epochs._raw_times = this_epochs._raw_times[tmask]
         this_epochs._data = this_epochs._data[:, :, tmask]
         return this_epochs
 
     @verbose
     def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
-                 verbose=None):
+                 copy=False, verbose=None):
         """Resample preloaded data
 
         Parameters
@@ -1252,37 +1411,49 @@ class Epochs(_BaseEpochs):
             Window to use in resampling. See scipy.signal.resample.
         n_jobs : int
             Number of jobs to run in parallel.
+        copy : bool
+            Whether to operate on a copy of the data (True) or modify data
+            in-place (False). Defaults to False.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
 
+        Returns
+        -------
+        epochs : instance of Epochs
+            The resampled epochs object.
+
         Notes
         -----
         For some data, it may be more accurate to use npad=0 to reduce
         artifacts. This is dataset dependent -- check your data!
         """
-        if self.preload:
-            o_sfreq = self.info['sfreq']
-            self._data = resample(self._data, sfreq, o_sfreq, npad,
-                                  n_jobs=n_jobs)
-            # adjust indirectly affected variables
-            self.info['sfreq'] = sfreq
-            self.times = (np.arange(self._data.shape[2], dtype=np.float)
-                          / sfreq + self.times[0])
-        else:
+        # XXX this could operate on non-preloaded data, too
+        if not self.preload:
             raise RuntimeError('Can only resample preloaded data')
 
+        inst = self.copy() if copy else self
+
+        o_sfreq = inst.info['sfreq']
+        inst._data = resample(inst._data, sfreq, o_sfreq, npad,
+                              n_jobs=n_jobs)
+        # adjust indirectly affected variables
+        inst.info['sfreq'] = sfreq
+        inst.times = (np.arange(inst._data.shape[2], dtype=np.float) /
+                      sfreq + inst.times[0])
+
+        return inst
+
     def copy(self):
         """Return copy of Epochs instance"""
-        raw = self.raw
-        del self.raw
-        new = cp.deepcopy(self)
-        self.raw = raw
-        new.raw = raw
-
+        raw = self._raw
+        del self._raw
+        new = deepcopy(self)
+        self._raw = raw
+        new._raw = raw
         return new
 
-    def save(self, fname):
+    def save(self, fname, split_size='2GB'):
         """Save epochs in a fif file
 
         Parameters
@@ -1290,218 +1461,34 @@ class Epochs(_BaseEpochs):
         fname : str
             The name of the file, which should end with -epo.fif or
             -epo.fif.gz.
-        """
-        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
-
-        # Create the file and save the essentials
-        fid = start_file(fname)
-
-        start_block(fid, FIFF.FIFFB_MEAS)
-        write_id(fid, FIFF.FIFF_BLOCK_ID)
-        if self.info['meas_id'] is not None:
-            write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, self.info['meas_id'])
-
-        # Write measurement info
-        write_meas_info(fid, self.info)
-
-        # One or more evoked data sets
-        start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
-        start_block(fid, FIFF.FIFFB_EPOCHS)
-
-        # write events out after getting data to ensure bad events are dropped
-        data = self.get_data()
-        start_block(fid, FIFF.FIFFB_MNE_EVENTS)
-        write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, self.events.T)
-        mapping_ = ';'.join([k + ':' + str(v) for k, v in
-                             self.event_id.items()])
-        write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
-        end_block(fid, FIFF.FIFFB_MNE_EVENTS)
-
-        # First and last sample
-        first = int(self.times[0] * self.info['sfreq'])
-        last = first + len(self.times) - 1
-        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
-        write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
-
-        # save baseline
-        if self.baseline is not None:
-            bmin, bmax = self.baseline
-            bmin = self.times[0] if bmin is None else bmin
-            bmax = self.times[-1] if bmax is None else bmax
-            write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
-            write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
-
-        # The epochs itself
-        decal = np.empty(self.info['nchan'])
-        for k in range(self.info['nchan']):
-            decal[k] = 1.0 / (self.info['chs'][k]['cal']
-                              * self.info['chs'][k].get('scale', 1.0))
-
-        data *= decal[np.newaxis, :, np.newaxis]
-
-        write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
-
-        # undo modifications to data
-        data /= decal[np.newaxis, :, np.newaxis]
-
-        write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
-                     json.dumps(self.drop_log))
-
-        write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
-                  self.selection)
-
-        end_block(fid, FIFF.FIFFB_EPOCHS)
-
-        end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
-        end_block(fid, FIFF.FIFFB_MEAS)
-        end_file(fid)
-
-    def as_data_frame(self, picks=None, index=None, scale_time=1e3,
-                      scalings=None, copy=True):
-        """Get the epochs as Pandas DataFrame
-
-        Export epochs data in tabular structure with MEG channels as columns
-        and three additional info columns 'epoch', 'condition', and 'time'.
-        The format matches a long table format commonly used to represent
-        repeated measures in within-subject designs.
-
-        Parameters
-        ----------
-        picks : array-like of int | None
-            If None only MEG and EEG channels are kept
-            otherwise the channels indices in picks are kept.
-        index : tuple of str | None
-            Column to be used as index for the data. Valid string options
-            are 'epoch', 'time' and 'condition'. If None, all three info
-            columns will be included in the table as categorial data.
-        scale_time : float
-            Scaling to be applied to time units.
-        scalings : dict | None
-            Scaling to be applied to the channels picked. If None, defaults to
-            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
-        copy : bool
-            If true, data will be copied. Else data may be modified in place.
-
-        Returns
-        -------
-        df : instance of pandas.core.DataFrame
-            Epochs exported into tabular data structure.
-        """
+        split_size : string | int
+            Large raw files are automatically split into multiple pieces. This
+            parameter specifies the maximum size of each piece. If the
+            parameter is an integer, it specifies the size in Bytes. It is
+            also possible to pass a human-readable string, e.g., 100MB.
+            Note: Due to FIFF file limitations, the maximum split size is 2GB.
 
-        pd = _check_pandas_installed()
-
-        default_index = ['condition', 'epoch', 'time']
-        if index is not None:
-            _check_pandas_index_arguments(index, default_index)
-        else:
-            index = default_index
-
-        if picks is None:
-            picks = list(range(self.info['nchan']))
-        else:
-            if not in1d(picks, np.arange(len(self.events))).all():
-                raise ValueError('At least one picked channel is not present '
-                                 'in this eppochs instance.')
-
-        data = self.get_data()[:, picks, :]
-        shape = data.shape
-        data = np.hstack(data).T
-        if copy:
-            data = data.copy()
-
-        types = [channel_type(self.info, idx) for idx in picks]
-        n_channel_types = 0
-        ch_types_used = []
-
-        scalings = _mutable_defaults(('scalings', scalings))[0]
-        for t in scalings.keys():
-            if t in types:
-                n_channel_types += 1
-                ch_types_used.append(t)
-
-        for t in ch_types_used:
-            scaling = scalings[t]
-            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
-            if len(idx) > 0:
-                data[:, idx] *= scaling
-
-        id_swapped = dict((v, k) for k, v in self.event_id.items())
-        names = [id_swapped[k] for k in self.events[:, 2]]
-
-        mindex = list()
-        mindex.append(('condition', np.repeat(names, shape[2])))
-        mindex.append(('time', np.tile(self.times, shape[0]) *
-                      scale_time))  # if 'epoch' in index:
-        mindex.append(('epoch', np.repeat(np.arange(shape[0]),
-                      shape[2])))
-
-        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
-        col_names = [self.ch_names[k] for k in picks]
-
-        df = pd.DataFrame(data, columns=col_names)
-        [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
-        if index is not None:
-            with warnings.catch_warnings(record=True):
-                if 'time' in index:
-                    df['time'] = df['time'].astype(np.int64)
-                df.set_index(index, inplace=True)
-
-        return df
-
-    def to_nitime(self, picks=None, epochs_idx=None, collapse=False,
-                  copy=True, first_samp=0):
-        """ Export epochs as nitime TimeSeries
-
-        Parameters
-        ----------
-        picks : array-like of int | None
-            Indices for exporting subsets of the epochs channels. If None
-            all good channels will be used.
-        epochs_idx : slice | array-like | None
-            Epochs index for single or selective epochs exports. If None, all
-            epochs will be used.
-        collapse : boolean
-            If True export epochs and time slices will be collapsed to 2D
-            array. This may be required by some nitime functions.
-        copy : boolean
-            If True exports copy of epochs data.
-        first_samp : int
-            Number of samples to offset the times by. Use raw.first_samp to
-            have the time returned relative to the session onset, or zero
-            (default) for time relative to the recording onset.
+            .. versionadded:: 0.10.0
 
-        Returns
-        -------
-        epochs_ts : instance of nitime.TimeSeries
-            The Epochs as nitime TimeSeries object.
+        Notes
+        -----
+        Bad epochs will be dropped before saving the epochs to disk.
         """
-        try:
-            from nitime import TimeSeries  # to avoid strong dependency
-        except ImportError:
-            raise Exception('the nitime package is missing')
-
-        if picks is None:
-            picks = pick_types(self.info, include=self.ch_names,
-                               exclude='bads')
-        if epochs_idx is None:
-            epochs_idx = slice(len(self.events))
-
-        data = self.get_data()[epochs_idx, picks]
-
-        if copy is True:
-            data = data.copy()
-
-        if collapse is True:
-            data = np.hstack(data).copy()
+        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+        split_size = _get_split_size(split_size)
 
-        offset = _time_as_index(abs(self.tmin), self.info['sfreq'],
-                                first_samp, True)
-        t0 = _index_as_time(self.events[0, 0] - offset, self.info['sfreq'],
-                            first_samp, True)[0]
-        epochs_ts = TimeSeries(data, sampling_rate=self.info['sfreq'], t0=t0)
-        epochs_ts.ch_names = np.array(self.ch_names)[picks].tolist()
+        # to know the length accurately. The get_data() call would drop
+        # bad epochs anyway
+        self.drop_bad_epochs()
+        total_size = self[0].get_data().nbytes * len(self)
+        n_parts = int(np.ceil(total_size / float(split_size)))
+        epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
 
-        return epochs_ts
+        for part_idx, epoch_idx in enumerate(epoch_idxs):
+            this_epochs = self[epoch_idx] if n_parts > 1 else self
+            # avoid missing event_ids in splits
+            this_epochs.event_id = self.event_id
+            _save_split(this_epochs, fname, part_idx, n_parts)
 
     def equalize_event_counts(self, event_ids, method='mintime', copy=True):
         """Equalize the number of trials in each condition
@@ -1524,10 +1511,15 @@ class Epochs(_BaseEpochs):
             a str (single event) or a list of str. In the case where one of
             the entries is a list of str, event_ids in that list will be
             grouped together before equalizing trial counts across conditions.
+            In the case where partial matching is used (using '/' in
+            `event_ids`), `event_ids` will be matched according to the
+            provided tags, that is, processing works as if the event_ids
+            matched by the provided tags had been supplied instead.
+            The event_ids must identify nonoverlapping subsets of the epochs.
         method : str
             If 'truncate', events will be truncated from the end of each event
-            list. If 'mintime', timing differences between each event list will
-            be minimized.
+            list. If 'mintime', timing differences between each event list
+            will be minimized.
         copy : bool
             If True, a copy of epochs will be returned. Otherwise, the
             function will operate in-place.
@@ -1540,7 +1532,7 @@ class Epochs(_BaseEpochs):
             Indices from the original events list that were dropped.
 
         Notes
-        ----
+        -----
         For example (if epochs.event_id was {'Left': 1, 'Right': 2,
         'Nonspatial':3}:
 
@@ -1548,6 +1540,11 @@ class Epochs(_BaseEpochs):
 
         would equalize the number of trials in the 'Nonspatial' condition with
         the total number of trials in the 'Left' and 'Right' conditions.
+
+        If multiple indices are provided (e.g. 'Left' and 'Right' in the
+        example above), it is not guaranteed that after equalization, the
+        conditions will contribute evenly. E.g., it is possible to end up
+        with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
         """
         if copy is True:
             epochs = self.copy()
@@ -1559,6 +1556,41 @@ class Epochs(_BaseEpochs):
             epochs.drop_bad_epochs()
         # figure out how to equalize
         eq_inds = list()
+
+        # deal with hierarchical tags
+        ids = epochs.event_id
+        tagging = False
+        if "/" in "".join(ids):
+            # make string inputs a list of length 1
+            event_ids = [[x] if isinstance(x, string_types) else x
+                         for x in event_ids]
+            for ids_ in event_ids:  # check if tagging is attempted
+                if any([id_ not in ids for id_ in ids_]):
+                    tagging = True
+            # 1. treat everything that's not in event_id as a tag
+            # 2a. for tags, find all the event_ids matched by the tags
+            # 2b. for non-tag ids, just pass them directly
+            # 3. do this for every input
+            event_ids = [[k for k in ids if all((tag in k.split("/")
+                         for tag in id_))]  # find ids matching all tags
+                         if all(id__ not in ids for id__ in id_)
+                         else id_  # straight pass for non-tag inputs
+                         for id_ in event_ids]
+            for id_ in event_ids:
+                if len(set([sub_id in ids for sub_id in id_])) != 1:
+                    err = ("Don't mix hierarchical and regular event_ids"
+                           " like in \'%s\'." % ", ".join(id_))
+                    raise ValueError(err)
+
+            # raise for non-orthogonal tags
+            if tagging is True:
+                events_ = [set(epochs[x].events[:, 0]) for x in event_ids]
+                doubles = events_[0].intersection(events_[1])
+                if len(doubles):
+                    raise ValueError("The two sets of epochs are "
+                                     "overlapping. Provide an "
+                                     "orthogonal selection.")
+
         for eq in event_ids:
             eq = np.atleast_1d(eq)
             # eq is now a list of types
@@ -1576,7 +1608,213 @@ class Epochs(_BaseEpochs):
         return epochs, indices
 
 
-class EpochsArray(Epochs):
+class Epochs(_BaseEpochs):
+    """Epochs extracted from a Raw instance
+
+    Parameters
+    ----------
+    raw : Raw object
+        An instance of Raw.
+    events : array of int, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to access associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    name : string
+        Comment that describes the Epochs data created.
+    preload : boolean
+        Load all epochs from disk when creating the object
+        or wait before accessing each epoch (more memory
+        efficient but can be slower).
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    decim : int
+        Factor by which to downsample the data from the raw file upon import.
+        Warning: This simply selects every nth sample, data is not filtered
+        here. If data is not properly filtered, aliasing artifacts may occur.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    detrend : int | None
+        If 0 or 1, the data channels (MEG and EEG) will be detrended when
+        loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
+        is no detrending. Note that detrending is performed before baseline
+        correction. If no DC offset is preferred (zeroth order detrending),
+        either turn off baseline correction, as this may introduce a DC
+        shift, or set baseline correction to use the entire time interval
+        (will yield equivalent results but be slower).
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    on_missing : str
+        What to do if one or several event ids are not found in the recording.
+        Valid keys are 'error' | 'warning' | 'ignore'
+        Default is 'error'. If on_missing is 'warning' it will proceed but
+        warn, if 'ignore' it will proceed silently. Note.
+        If none of the event ids are found in the data, an error will be
+        automatically generated irrespective of this parameter.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Attributes
+    ----------
+    info: dict
+        Measurement info.
+    event_id : dict
+        Names of conditions corresponding to event_ids.
+    ch_names : list of string
+        List of channel names.
+    selection : array
+        List of indices of selected events (not dropped or ignored etc.). For
+        example, if the original event array had 4 events and the second event
+        has been dropped, this attribute would be np.array([0, 2, 3]).
+    preload : bool
+        Indicates whether epochs are in memory.
+    drop_log : list of lists
+        A list of the same length as the event array used to initialize the
+        Epochs object. If the i-th original event is still part of the
+        selection, drop_log[i] will be an empty list; otherwise it will be
+        a list of the reasons the event is not longer in the selection, e.g.:
+
+        'IGNORED' if it isn't part of the current subset defined by the user;
+        'NO_DATA' or 'TOO_SHORT' if epoch didn't contain enough data;
+        names of channels that exceeded the amplitude threshold;
+        'EQUALIZED_COUNTS' (see equalize_event_counts);
+        or 'USER' for user-defined reasons (see drop_epochs).
+    verbose : bool, str, int, or None
+        See above.
+
+    Notes
+    -----
+    When accessing data, Epochs are detrended, baseline-corrected, and
+    decimated, then projectors are (optionally) applied.
+
+    For indexing and slicing:
+
+    epochs[idx] : Epochs
+        Return Epochs object with a subset of epochs (supports single
+        index and python-style slicing)
+
+    For subset selection using categorial labels:
+
+    epochs['name'] : Epochs
+        Return Epochs object with a subset of epochs corresponding to an
+        experimental condition as specified by 'name'.
+
+        If conditions are tagged by names separated by '/' (e.g. 'audio/left',
+        'audio/right'), and 'name' is not in itself an event key, this selects
+        every event whose condition contains the 'name' tag (e.g., 'left'
+        matches 'audio/left' and 'visual/left'; but not 'audio_left'). Note
+        that tags like 'auditory/left' and 'left/auditory' will be treated the
+        same way when accessed using tags.
+
+    epochs[['name_1', 'name_2', ... ]] : Epochs
+        Return Epochs object with a subset of epochs corresponding to multiple
+        experimental conditions as specified by 'name_1', 'name_2', ... .
+
+        If conditions are separated by '/', selects every item containing every
+        list tag (e.g. ['audio', 'left'] selects 'audio/left' and
+        'audio/center/left', but not 'audio/right').
+
+    See Also
+    --------
+    mne.epochs.combine_event_ids
+    mne.Epochs.equalize_event_counts
+    """
+    @verbose
+    def __init__(self, raw, events, event_id, tmin, tmax, baseline=(None, 0),
+                 picks=None, name='Unknown', preload=False, reject=None,
+                 flat=None, proj=True, decim=1, reject_tmin=None,
+                 reject_tmax=None, detrend=None, add_eeg_ref=True,
+                 on_missing='error', verbose=None):
+        if not isinstance(raw, _BaseRaw):
+            raise ValueError('The first argument to `Epochs` must be an '
+                             'instance of `mne.io.Raw`')
+        info = deepcopy(raw.info)
+
+        # proj is on when applied in Raw
+        proj = proj or raw.proj
+
+        # call _BaseEpochs constructor
+        super(Epochs, self).__init__(info, None, events, event_id, tmin, tmax,
+                                     baseline=baseline, raw=raw, picks=picks,
+                                     name=name, reject=reject, flat=flat,
+                                     decim=decim, reject_tmin=reject_tmin,
+                                     reject_tmax=reject_tmax, detrend=detrend,
+                                     add_eeg_ref=add_eeg_ref, proj=proj,
+                                     on_missing=on_missing,
+                                     preload_at_end=preload, verbose=verbose)
+
+    @verbose
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Load one epoch from disk"""
+        if self._raw is None:
+            # This should never happen, as raw=None only if preload=True
+            raise ValueError('An error has occurred, no valid raw file found.'
+                             ' Please report this to the mne-python '
+                             'developers.')
+        sfreq = self._raw.info['sfreq']
+        event_samp = self.events[idx, 0]
+        # Read a data segment
+        first_samp = self._raw.first_samp
+        start = int(round(event_samp + self.tmin * sfreq)) - first_samp
+        stop = start + len(self._raw_times)
+        return None if start < 0 else self._raw[self.picks, start:stop][0]
+
+
+class EpochsArray(_BaseEpochs):
     """Epochs object from numpy array
 
     Parameters
@@ -1586,7 +1824,7 @@ class EpochsArray(Epochs):
     info : instance of Info
         Info dictionary. Consider using ``create_info`` to populate
         this structure.
-    events : array, shape (n_events, 3)
+    events : array of int, shape (n_events, 3)
         The events typically returned by the read_events function.
         If some events don't match the events of interest as specified
         by event_id, they will be marked as 'IGNORED' in the drop log.
@@ -1594,17 +1832,16 @@ class EpochsArray(Epochs):
         Start time before event.
     event_id : int | list of int | dict | None
         The id of the event to consider. If dict,
-        the keys can later be used to acces associated events. Example:
+        the keys can later be used to access associated events. Example:
         dict(auditory=1, visual=3). If int, a dict will be created with
         the id as string. If a list, all events with the IDs specified
         in the list are used. If None, all events will be used with
         and a dict is created with string integer names corresponding
         to the event id integers.
-    reject : dict
-        Epoch rejection parameters based on peak to peak amplitude.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-        If reject is None then no rejection is done.
-        Values are float. Example::
+        If reject is None then no rejection is done. Example::
 
             reject = dict(grad=4000e-13, # T / m (gradiometers)
                           mag=4e-12, # T (magnetometers)
@@ -1612,9 +1849,10 @@ class EpochsArray(Epochs):
                           eog=250e-6 # uV (EOG channels)
                           )
 
-    flat : dict
-        Epoch rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
         If flat is None then no rejection is done.
     reject_tmin : scalar | None
         Start of the time window used to reject epochs (with the default None,
@@ -1622,78 +1860,54 @@ class EpochsArray(Epochs):
     reject_tmax : scalar | None
         End of the time window used to reject epochs (with the default None,
         the window will end with tmax).
+    baseline : None or tuple of length 2 (default: None)
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         Defaults to raw.verbose.
+
+    See Also
+    --------
+    io.RawArray, EvokedArray, create_info
     """
 
     @verbose
     def __init__(self, data, info, events, tmin=0, event_id=None,
                  reject=None, flat=None, reject_tmin=None,
-                 reject_tmax=None, verbose=None):
-
+                 reject_tmax=None, baseline=None, verbose=None):
         dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
         data = np.asanyarray(data, dtype=dtype)
-
         if data.ndim != 3:
             raise ValueError('Data must be a 3D array of shape (n_epochs, '
                              'n_channels, n_samples)')
 
-        if len(info['ch_names']) != np.shape(data)[1]:
+        if len(info['ch_names']) != data.shape[1]:
             raise ValueError('Info and data must have same number of '
                              'channels.')
-
-        self.info = info
-        self._data = data
+        if data.shape[0] != len(events):
+            raise ValueError('The number of epochs and the number of events'
+                             'must match')
+        tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
         if event_id is None:  # convert to int to make typing-checks happy
             event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
-        self.event_id = event_id
-        self.events = events
-
-        for key, val in self.event_id.items():
-            if val not in events[:, 2]:
-                msg = ('No matching events found for %s '
-                       '(event id %i)' % (key, val))
-                raise ValueError(msg)
-
-        self.proj = None
-        self.baseline = None
-        self.preload = True
-        self.reject = None
-        self.decim = 1
-        self._decim_idx = slice(0, data.shape[-1], self.decim)
-        self.raw = None
-        self.drop_log = [[] for _ in range(len(events))]
-        self._bad_dropped = True
-
-        self.selection = np.arange(len(events))
-        self.picks = None
-        self.times = (np.arange(data.shape[-1], dtype=np.float) /
-                      info['sfreq'] + tmin)
-        self.tmin = self.times[0]
-        self.tmax = self.times[-1]
-        self.verbose = verbose
-        self.name = 'Unknown'
-        self._projector = None
-        self.reject = reject
-        self.flat = flat
-        self.reject_tmin = reject_tmin
-        self.reject_tmax = reject_tmax
-        self._reject_setup()
-        drop_inds = list()
-        if self.reject is not None or self.flat is not None:
-            for i_epoch, epoch in enumerate(self):
-                is_good, chan = self._is_good_epoch(epoch,
-                                                    verbose=self.verbose)
-                if not is_good:
-                    drop_inds.append(i_epoch)
-                    self.drop_log[i_epoch].extend(chan)
-        if drop_inds:
-            select = np.ones(len(events), dtype=np.bool)
-            select[drop_inds] = False
-            self.events = self.events[select]
-            self._data = self._data[select]
-            self.selection[select]
+        super(EpochsArray, self).__init__(info, data, events, event_id, tmin,
+                                          tmax, baseline, reject=reject,
+                                          flat=flat, reject_tmin=reject_tmin,
+                                          reject_tmax=reject_tmax, decim=1)
+        if len(events) != in1d(self.events[:, 2],
+                               list(self.event_id.values())).sum():
+            raise ValueError('The events must only contain event numbers from '
+                             'event_id')
+        for ii, e in enumerate(self._data):
+            # This is safe without assignment b/c there is no decim
+            self._detrend_offset_decim(e)
+        self.drop_bad_epochs()
 
 
 def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
@@ -1746,7 +1960,8 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
     # replace the event numbers in the events list
     epochs.events[inds, 2] = new_event_num
     # delete old entries
-    [epochs.event_id.pop(key) for key in old_event_ids]
+    for key in old_event_ids:
+        epochs.event_id.pop(key)
     # add the new entry
     epochs.event_id.update(new_event_id)
     return epochs
@@ -1763,7 +1978,7 @@ def equalize_epoch_counts(epochs_list, method='mintime'):
     by minimizing the differences in the times of the events in the two sets of
     epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
     other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
-    [1, 2] in the first epochs and not [20, 21].
+    [1, 2] in the first epochs and not [120, 121].
 
     Note that this operates on the Epochs instances in-place.
 
@@ -1780,11 +1995,13 @@ def equalize_epoch_counts(epochs_list, method='mintime'):
         list. If 'mintime', timing differences between each event list will be
         minimized.
     """
-    if not all([isinstance(e, Epochs) for e in epochs_list]):
+    if not all(isinstance(e, Epochs) for e in epochs_list):
         raise ValueError('All inputs must be Epochs instances')
 
     # make sure bad epochs are dropped
-    [e.drop_bad_epochs() if not e._bad_dropped else None for e in epochs_list]
+    for e in epochs_list:
+        if not e._bad_dropped:
+            e.drop_bad_epochs()
     event_times = [e.events[:, 0] for e in epochs_list]
     indices = _get_drop_indices(event_times, method)
     for e, inds in zip(epochs_list, indices):
@@ -1795,7 +2012,7 @@ def _get_drop_indices(event_times, method):
     """Helper to get indices to drop from multiple event timing lists"""
     small_idx = np.argmin([e.shape[0] for e in event_times])
     small_e_times = event_times[small_idx]
-    if not method in ['mintime', 'truncate']:
+    if method not in ['mintime', 'truncate']:
         raise ValueError('method must be either mintime or truncate, not '
                          '%s' % method)
     indices = list()
@@ -1810,29 +2027,45 @@ def _get_drop_indices(event_times, method):
     return indices
 
 
+def _fix_fill(fill):
+    """Helper to fix bug on old scipy"""
+    if LooseVersion(scipy.__version__) < LooseVersion('0.12'):
+        fill = fill[:, np.newaxis]
+    return fill
+
+
 def _minimize_time_diff(t_shorter, t_longer):
     """Find a boolean mask to minimize timing differences"""
+    from scipy.interpolate import interp1d
     keep = np.ones((len(t_longer)), dtype=bool)
     scores = np.ones((len(t_longer)))
-    for iter in range(len(t_longer) - len(t_shorter)):
+    x1 = np.arange(len(t_shorter))
+    # The first set of keep masks to test
+    kwargs = dict(copy=False, bounds_error=False)
+    # this is a speed tweak, only exists for certain versions of scipy
+    if 'assume_sorted' in inspect.getargspec(interp1d.__init__).args:
+        kwargs['assume_sorted'] = True
+    shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
+                              **kwargs)
+    for ii in range(len(t_longer) - len(t_shorter)):
         scores.fill(np.inf)
+        # set up the keep masks to test, eliminating any rows that are already
+        # gone
+        keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
+        keep_mask[:, ~keep] = False
         # Check every possible removal to see if it minimizes
-        for idx in np.where(keep)[0]:
-            keep[idx] = False
-            scores[idx] = _area_between_times(t_shorter, t_longer[keep])
-            keep[idx] = True
+        x2 = np.arange(len(t_longer) - ii - 1)
+        t_keeps = np.array([t_longer[km] for km in keep_mask])
+        longer_interp = interp1d(x2, t_keeps, axis=1,
+                                 fill_value=_fix_fill(t_keeps[:, -1]),
+                                 **kwargs)
+        d1 = longer_interp(x1) - t_shorter
+        d2 = shorter_interp(x2) - t_keeps
+        scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
         keep[np.argmin(scores)] = False
     return keep
 
 
-def _area_between_times(t1, t2):
-    """Quantify the difference between two timing sets"""
-    x1 = list(range(len(t1)))
-    x2 = list(range(len(t2)))
-    xs = np.concatenate((x1, x2))
-    return np.sum(np.abs(np.interp(xs, x1, t1) - np.interp(xs, x2, t2)))
-
-
 @verbose
 def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
              ignore_chs=[], verbose=None):
@@ -1847,7 +2080,7 @@ def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
                         for c in ch_names], dtype=bool)] = False
     for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
         if refl is not None:
-            for key, thresh in six.iteritems(refl):
+            for key, thresh in iteritems(refl):
                 idx = channel_type_idx[key]
                 name = key.upper()
                 if len(idx) > 0:
@@ -1878,7 +2111,115 @@ def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
 
 
 @verbose
-def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
+def _read_one_epoch_file(f, tree, fname, preload):
+    """Helper to read a single FIF file"""
+
+    with f as fid:
+        #   Read the measurement info
+        info, meas = read_meas_info(fid, tree)
+        info['filename'] = fname
+
+        events, mappings = _read_events_fif(fid, tree)
+
+        #   Locate the data of interest
+        processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+        if len(processed) == 0:
+            raise ValueError('Could not find processed data')
+
+        epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
+        if len(epochs_node) == 0:
+            raise ValueError('Could not find epochs data')
+
+        my_epochs = epochs_node[0]
+
+        # Now find the data in the block
+        name = None
+        data = None
+        data_tag = None
+        bmin, bmax = None, None
+        baseline = None
+        selection = None
+        drop_log = None
+        for k in range(my_epochs['nent']):
+            kind = my_epochs['directory'][k].kind
+            pos = my_epochs['directory'][k].pos
+            if kind == FIFF.FIFF_FIRST_SAMPLE:
+                tag = read_tag(fid, pos)
+                first = int(tag.data)
+            elif kind == FIFF.FIFF_LAST_SAMPLE:
+                tag = read_tag(fid, pos)
+                last = int(tag.data)
+            elif kind == FIFF.FIFF_COMMENT:
+                tag = read_tag(fid, pos)
+                name = tag.data
+            elif kind == FIFF.FIFF_EPOCH:
+                # delay reading until later
+                fid.seek(pos, 0)
+                data_tag = read_tag_info(fid)
+                data_tag.pos = pos
+            elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
+                tag = read_tag(fid, pos)
+                bmin = float(tag.data)
+            elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
+                tag = read_tag(fid, pos)
+                bmax = float(tag.data)
+            elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
+                tag = read_tag(fid, pos)
+                selection = np.array(tag.data)
+            elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
+                tag = read_tag(fid, pos)
+                drop_log = json.loads(tag.data)
+
+        if bmin is not None or bmax is not None:
+            baseline = (bmin, bmax)
+
+        n_samp = last - first + 1
+        logger.info('    Found the data of interest:')
+        logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                    % (1000 * first / info['sfreq'],
+                       1000 * last / info['sfreq'], name))
+        if info['comps'] is not None:
+            logger.info('        %d CTF compensation matrices available'
+                        % len(info['comps']))
+
+        # Inspect the data
+        if data_tag is None:
+            raise ValueError('Epochs data not found')
+        epoch_shape = (len(info['ch_names']), n_samp)
+        expected = len(events) * np.prod(epoch_shape)
+        if data_tag.size // 4 - 4 != expected:  # 32-bit floats stored
+            raise ValueError('Incorrect number of samples (%d instead of %d)'
+                             % (data_tag.size // 4, expected))
+
+        # Calibration factors
+        cals = np.array([[info['chs'][k]['cal'] *
+                          info['chs'][k].get('scale', 1.0)]
+                         for k in range(info['nchan'])], np.float64)
+
+        # Read the data
+        if preload:
+            data = read_tag(fid, data_tag.pos).data.astype(np.float64)
+            data *= cals[np.newaxis, :, :]
+
+        # Put it all together
+        tmin = first / info['sfreq']
+        tmax = last / info['sfreq']
+        event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
+                    if mappings is None else mappings)
+        # In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
+        # (version < 0.8):
+        if selection is None:
+            selection = np.arange(len(events))
+        if drop_log is None:
+            drop_log = [[] for _ in range(len(epochs))]  # noqa, analysis:ignore
+
+    return (info, data, data_tag, events, event_id, tmin, tmax, baseline, name,
+            selection, drop_log, epoch_shape, cals)
+
+
+ at verbose
+def read_epochs(fname, proj=True, add_eeg_ref=True, preload=True,
+                verbose=None):
     """Read epochs from a fif file
 
     Parameters
@@ -1899,6 +2240,9 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     add_eeg_ref : bool
         If True, an EEG average reference will be added (unless one
         already exists).
+    preload : bool
+        If True, read all epochs from disk immediately. If False, epochs will
+        be read on demand.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         Defaults to raw.verbose.
@@ -1908,130 +2252,144 @@ def read_epochs(fname, proj=True, add_eeg_ref=True, verbose=None):
     epochs : instance of Epochs
         The epochs
     """
-    check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
-
-    epochs = Epochs(None, None, None, None, None)
-
-    logger.info('Reading %s ...' % fname)
-    fid, tree, _ = fiff_open(fname)
-
-    #   Read the measurement info
-    info, meas = read_meas_info(fid, tree)
-    info['filename'] = fname
-
-    events, mappings = _read_events_fif(fid, tree)
-
-    #   Locate the data of interest
-    processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
-    if len(processed) == 0:
-        fid.close()
-        raise ValueError('Could not find processed data')
-
-    epochs_node = dir_tree_find(tree, FIFF.FIFFB_EPOCHS)
-    if len(epochs_node) == 0:
-        fid.close()
-        raise ValueError('Could not find epochs data')
-
-    my_epochs = epochs_node[0]
-
-    # Now find the data in the block
-    comment = None
-    data = None
-    bmin, bmax = None, None
-    baseline = None
-    selection = None
-    drop_log = []
-    for k in range(my_epochs['nent']):
-        kind = my_epochs['directory'][k].kind
-        pos = my_epochs['directory'][k].pos
-        if kind == FIFF.FIFF_FIRST_SAMPLE:
-            tag = read_tag(fid, pos)
-            first = int(tag.data)
-        elif kind == FIFF.FIFF_LAST_SAMPLE:
-            tag = read_tag(fid, pos)
-            last = int(tag.data)
-        elif kind == FIFF.FIFF_COMMENT:
-            tag = read_tag(fid, pos)
-            comment = tag.data
-        elif kind == FIFF.FIFF_EPOCH:
-            tag = read_tag(fid, pos)
-            data = tag.data.astype(np.float)
-        elif kind == FIFF.FIFF_MNE_BASELINE_MIN:
-            tag = read_tag(fid, pos)
-            bmin = float(tag.data)
-        elif kind == FIFF.FIFF_MNE_BASELINE_MAX:
-            tag = read_tag(fid, pos)
-            bmax = float(tag.data)
-        elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
-            tag = read_tag(fid, pos)
-            selection = np.array(tag.data)
-        elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
-            tag = read_tag(fid, pos)
-            drop_log = json.loads(tag.data)
-
-    if bmin is not None or bmax is not None:
-        baseline = (bmin, bmax)
-
-    nsamp = last - first + 1
-    logger.info('    Found the data of interest:')
-    logger.info('        t = %10.2f ... %10.2f ms (%s)'
-                % (1000 * first / info['sfreq'],
-                   1000 * last / info['sfreq'], comment))
-    if info['comps'] is not None:
-        logger.info('        %d CTF compensation matrices available'
-                    % len(info['comps']))
-
-    # Read the data
-    if data is None:
-        raise ValueError('Epochs data not found')
-
-    if data.shape[2] != nsamp:
-        fid.close()
-        raise ValueError('Incorrect number of samples (%d instead of %d)'
-                         % (data.shape[2], nsamp))
-
-    # Calibrate
-    cals = np.array([info['chs'][k]['cal'] * info['chs'][k].get('scale', 1.0)
-                     for k in range(info['nchan'])])
-    data *= cals[np.newaxis, :, np.newaxis]
-
-    times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
-    tmin = times[0]
-    tmax = times[-1]
-
-    # Put it all together
-    epochs.preload = True
-    epochs.raw = None
-    epochs.picks = np.arange(data.shape[1])
-    epochs._bad_dropped = True
-    epochs.events = events
-    epochs._data = data
-    epochs.info = info
-    epochs.tmin = tmin
-    epochs.tmax = tmax
-    epochs.name = comment
-    epochs.times = times
-    epochs._data = data
-    epochs.proj = proj
-    activate = False if epochs._check_delayed() else proj
-    epochs._projector, epochs.info = setup_proj(info, add_eeg_ref,
-                                                activate=activate)
+    return EpochsFIF(fname, proj, add_eeg_ref, preload, verbose)
 
-    epochs.baseline = baseline
-    epochs.event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
-                       if mappings is None else mappings)
-    epochs.verbose = verbose
 
-    # In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
-    # (version < 0.8):
-    if selection is None:
-        selection = range(len(epochs))
+class _RawContainer(object):
+    def __init__(self, fid, data_tag, event_samps, epoch_shape, cals):
+        self.fid = fid
+        self.data_tag = data_tag
+        self.event_samps = event_samps
+        self.epoch_shape = epoch_shape
+        self.cals = cals
+        self.proj = False
 
-    epochs.selection = selection
-    epochs.drop_log = drop_log
-    fid.close()
+    def __del__(self):
+        self.fid.close()
 
-    return epochs
+
+class EpochsFIF(_BaseEpochs):
+    """Epochs read from disk
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file, which should end with -epo.fif or -epo.fif.gz.
+    proj : bool | 'delayed'
+        Apply SSP projection vectors. If proj is 'delayed' and reject is not
+        None the single epochs will be projected before the rejection
+        decision, but used in unprojected state if they are kept.
+        This way deciding which projection vectors are good can be postponed
+        to the evoked stage without resulting in lower epoch counts and
+        without producing results different from early SSP application
+        given comparable parameters. Note that in this case baselining,
+        detrending and temporal decimation will be postponed.
+        If proj is False no projections will be applied which is the
+        recommended value if SSPs are not used for cleaning the data.
+    add_eeg_ref : bool
+        If True, an EEG average reference will be added (unless one
+        already exists).
+    preload : bool
+        If True, read all epochs from disk immediately. If False, epochs will
+        be read on demand.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    See Also
+    --------
+    mne.Epochs
+    mne.epochs.combine_event_ids
+    mne.Epochs.equalize_event_counts
+    """
+    @verbose
+    def __init__(self, fname, proj=True, add_eeg_ref=True, preload=True,
+                 verbose=None):
+        check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
+
+        fnames = [fname]
+        ep_list = list()
+        raw = list()
+        for fname in fnames:
+            logger.info('Reading %s ...' % fname)
+            fid, tree, _ = fiff_open(fname)
+            next_fname = _get_next_fname(fid, fname, tree)
+            (info, data, data_tag, events, event_id, tmin, tmax, baseline,
+             name, selection, drop_log, epoch_shape, cals) = \
+                _read_one_epoch_file(fid, tree, fname, preload)
+            # here we ignore missing events, since users should already be
+            # aware of missing events if they have saved data that way
+            epoch = _BaseEpochs(
+                info, data, events, event_id, tmin, tmax, baseline,
+                on_missing='ignore', selection=selection, drop_log=drop_log,
+                add_eeg_ref=False, proj=False, verbose=False)
+            ep_list.append(epoch)
+            if not preload:
+                # store everything we need to index back to the original data
+                raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
+                                         events[:, 0].copy(), epoch_shape,
+                                         cals))
+
+            if next_fname is not None:
+                fnames.append(next_fname)
+
+        (info, data, events, event_id, tmin, tmax, baseline, selection,
+         drop_log, _) = _concatenate_epochs(ep_list, with_data=preload)
+        # we need this uniqueness for non-preloaded data to work properly
+        if len(np.unique(events[:, 0])) != len(events):
+            raise RuntimeError('Event time samples were not unique')
+
+        # correct the drop log
+        assert len(drop_log) % len(fnames) == 0
+        step = len(drop_log) // len(fnames)
+        offsets = np.arange(step, len(drop_log) + 1, step)
+        for i1, i2 in zip(offsets[:-1], offsets[1:]):
+            other_log = drop_log[i1:i2]
+            for k, (a, b) in enumerate(zip(drop_log, other_log)):
+                    if a == ['IGNORED'] and b != ['IGNORED']:
+                        drop_log[k] = b
+        drop_log = drop_log[:step]
+
+        # call _BaseEpochs constructor
+        super(EpochsFIF, self).__init__(
+            info, data, events, event_id, tmin, tmax, baseline, raw=raw,
+            name=name, proj=proj, add_eeg_ref=add_eeg_ref,
+            preload_at_end=False, on_missing='ignore', selection=selection,
+            drop_log=drop_log, verbose=verbose)
+        # use the private property instead of drop_bad_epochs so that epochs
+        # are not all read from disk for preload=False
+        self._bad_dropped = True
+
+    @verbose
+    def _get_epoch_from_raw(self, idx, verbose=None):
+        """Load one epoch from disk"""
+        # Find the right file and offset to use
+        event_samp = self.events[idx, 0]
+        for raw in self._raw:
+            idx = np.where(raw.event_samps == event_samp)[0]
+            if len(idx) == 1:
+                idx = idx[0]
+                size = np.prod(raw.epoch_shape) * 4
+                offset = idx * size
+                break
+        else:
+            # read the correct subset of the data
+            raise RuntimeError('Correct epoch could not be found, please '
+                               'contact mne-python developers')
+        # the following is equivalent to this, but faster:
+        #
+        # >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
+        # >>> data *= raw.cals[np.newaxis, :, :]
+        # >>> data = data[idx]
+        #
+        # Eventually this could be refactored in io/tag.py if other functions
+        # could make use of it
+
+        raw.fid.seek(raw.data_tag.pos + offset + 16, 0)  # 16 = Tag header
+        data = np.fromstring(raw.fid.read(size), '>f4').astype(np.float64)
+        data.shape = raw.epoch_shape
+        data *= raw.cals
+        return data
 
 
 def bootstrap(epochs, random_state=None):
@@ -2101,7 +2459,7 @@ def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
     epochs_list : list of Epochs
         Epochs object to concatenate.
     name : str
-        Comment that describes the Evoked data created.
+        Comment that describes the Epochs data created.
     add_eeg_ref : bool
         If True, an EEG average reference will be added (unless there is no
         EEG in the data).
@@ -2114,7 +2472,7 @@ def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
     epochs : Epochs
         Concatenated epochs.
     """
-    if not np.all([e.preload for e in epochs_list]):
+    if not all(e.preload for e in epochs_list):
         raise ValueError('All epochs must be preloaded.')
 
     info = _merge_info([epochs.info for epochs in epochs_list])
@@ -2132,9 +2490,9 @@ def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
         raise RuntimeError(err)
 
     events = epochs_list[0].events.copy()
-    all_same = np.all([events == epochs.events for epochs in epochs_list[1:]],
-                      axis=0)
-    if not np.all(all_same):
+    all_same = all(np.array_equal(events, epochs.events)
+                   for epochs in epochs_list[1:])
+    if not all_same:
         raise ValueError('Events must be the same.')
 
     proj = any(e.proj for e in epochs_list) or add_eeg_ref
@@ -2155,7 +2513,90 @@ def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
     epochs.preload = True
     epochs._bad_dropped = True
     epochs._data = data
-    epochs.proj = proj
     epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
                                                 activate=proj)
     return epochs
+
+
+def _compare_epochs_infos(info1, info2, ind):
+    """Compare infos"""
+    info1._check_consistency()
+    info2._check_consistency()
+    if info1['nchan'] != info2['nchan']:
+        raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
+    if info1['bads'] != info2['bads']:
+        raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
+    if info1['sfreq'] != info2['sfreq']:
+        raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
+    if set(info1['ch_names']) != set(info2['ch_names']):
+        raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
+    if len(info2['projs']) != len(info1['projs']):
+        raise ValueError('SSP projectors in epochs files must be the same')
+    if any(not _proj_equal(p1, p2) for p1, p2 in
+           zip(info2['projs'], info1['projs'])):
+        raise ValueError('SSP projectors in epochs files must be the same')
+
+
+def _concatenate_epochs(epochs_list, with_data=True):
+    """Auxiliary function for concatenating epochs."""
+    out = epochs_list[0]
+    data = [out.get_data()] if with_data else None
+    events = [out.events]
+    baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
+    info = deepcopy(out.info)
+    verbose = out.verbose
+    drop_log = deepcopy(out.drop_log)
+    event_id = deepcopy(out.event_id)
+    selection = out.selection
+    for ii, epochs in enumerate(epochs_list[1:]):
+        _compare_epochs_infos(epochs.info, info, ii)
+        if not np.array_equal(epochs.times, epochs_list[0].times):
+            raise ValueError('Epochs must have same times')
+
+        if epochs.baseline != baseline:
+            raise ValueError('Baseline must be same for all epochs')
+
+        if with_data:
+            data.append(epochs.get_data())
+        events.append(epochs.events)
+        selection = np.concatenate((selection, epochs.selection))
+        drop_log.extend(epochs.drop_log)
+        event_id.update(epochs.event_id)
+    events = np.concatenate(events, axis=0)
+    if with_data:
+        data = np.concatenate(data, axis=0)
+    return (info, data, events, event_id, tmin, tmax, baseline, selection,
+            drop_log, verbose)
+
+
+def _finish_concat(info, data, events, event_id, tmin, tmax, baseline,
+                   selection, drop_log, verbose):
+    """Helper to finish concatenation for epochs not read from disk"""
+    events[:, 0] = np.arange(len(events))  # arbitrary after concat
+    selection = np.where([len(d) == 0 for d in drop_log])[0]
+    out = _BaseEpochs(info, data, events, event_id, tmin, tmax,
+                      baseline=baseline, add_eeg_ref=False,
+                      selection=selection, drop_log=drop_log,
+                      proj=False, on_missing='ignore', verbose=verbose)
+    out.drop_bad_epochs()
+    return out
+
+
+def concatenate_epochs(epochs_list):
+    """Concatenate a list of epochs into one epochs object
+
+    Parameters
+    ----------
+    epochs_list : list
+        list of Epochs instances to concatenate (in order).
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The result of the concatenation (first Epochs instance passed in).
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    return _finish_concat(*_concatenate_epochs(epochs_list))
diff --git a/mne/event.py b/mne/event.py
index c7d6d48..fd15e63 100644
--- a/mne/event.py
+++ b/mne/event.py
@@ -3,6 +3,8 @@
 
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Clement Moutard <clement.moutard at polytechnique.org>
 #
 # License: BSD (3-clause)
 
@@ -18,11 +20,13 @@ from .io.write import write_int, start_block, start_file, end_block, end_file
 from .io.pick import pick_channels
 
 
-def pick_events(events, include=None, exclude=None):
+def pick_events(events, include=None, exclude=None, step=False):
     """Select some events
 
     Parameters
     ----------
+    events : ndarray
+        Array as returned by mne.find_events.
     include : int | list | None
         A event id to include or a list of them.
         If None all events are included.
@@ -30,6 +34,11 @@ def pick_events(events, include=None, exclude=None):
         A event id to exclude or a list of them.
         If None no event is excluded. If include is not None
         the exclude parameter is ignored.
+    step : bool
+        If True (default is False), events have a step format according
+        to the argument output='step' in the function find_events().
+        In this case, the two last columns are considered in inclusion/
+        exclusion criteria.
 
     Returns
     -------
@@ -42,6 +51,8 @@ def pick_events(events, include=None, exclude=None):
         mask = np.zeros(len(events), dtype=np.bool)
         for e in include:
             mask = np.logical_or(mask, events[:, 2] == e)
+            if step:
+                mask = np.logical_or(mask, events[:, 1] == e)
         events = events[mask]
     elif exclude is not None:
         if not isinstance(exclude, list):
@@ -49,6 +60,8 @@ def pick_events(events, include=None, exclude=None):
         mask = np.ones(len(events), dtype=np.bool)
         for e in exclude:
             mask = np.logical_and(mask, events[:, 2] != e)
+            if step:
+                mask = np.logical_and(mask, events[:, 1] != e)
         events = events[mask]
     else:
         events = np.copy(events)
@@ -106,7 +119,7 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
 
     new_events = []
     lag = []
-    for event in events.copy().astype('f8'):
+    for event in events.copy().astype(int):
         if event[2] == reference_id:
             lower = event[0] + imin
             upper = event[0] + imax
@@ -119,13 +132,14 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax,
             elif fill_na is not None:
                 event[2] = fill_na
                 new_events += [event]
-                lag += [fill_na]
+                lag.append(np.nan)
 
     new_events = np.array(new_events)
 
-    lag = np.abs(lag, dtype='f8')
+    with np.errstate(invalid='ignore'):  # casting nans
+        lag = np.abs(lag, dtype='f8')
     if lag.any():
-        lag[lag != fill_na] *= tsample
+        lag *= tsample
     else:
         lag = np.array([])
 
@@ -174,7 +188,7 @@ def _read_events_fif(fid, tree):
     return event_list, mappings
 
 
-def read_events(filename, include=None, exclude=None):
+def read_events(filename, include=None, exclude=None, mask=0):
     """Reads events from fif or text file
 
     Parameters
@@ -193,12 +207,19 @@ def read_events(filename, include=None, exclude=None):
         A event id to exclude or a list of them.
         If None no event is excluded. If include is not None
         the exclude parameter is ignored.
+    mask : int
+        The value of the digital mask to apply to the stim channel values.
+        The default value is 0.
 
     Returns
     -------
     events: array, shape (n_events, 3)
         The list of events
 
+    See Also
+    --------
+    find_events, write_events
+
     Notes
     -----
     This function will discard the offset line (i.e., first line with zero
@@ -240,6 +261,8 @@ def read_events(filename, include=None, exclude=None):
             event_list = event_list[1:]
 
     event_list = pick_events(event_list, include, exclude)
+    event_list = _mask_trigs(event_list, mask)
+
     return event_list
 
 
@@ -258,6 +281,10 @@ def write_events(filename, event_list):
 
     event_list : array, shape (n_events, 3)
         The list of events
+
+    See Also
+    --------
+    read_events
     """
     check_fname(filename, 'events', ('.eve', '-eve.fif', '-eve.fif.gz',
                                      '-eve.lst', '-eve.txt'))
@@ -274,7 +301,8 @@ def write_events(filename, event_list):
         end_file(fid)
     else:
         f = open(filename, 'w')
-        [f.write('%6d %6d %3d\n' % tuple(e)) for e in event_list]
+        for e in event_list:
+            f.write('%6d %6d %3d\n' % tuple(e))
         f.close()
 
 
@@ -306,7 +334,7 @@ def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0):
         idx = (diff <= abs(merge))
         if np.any(idx):
             where = np.where(idx)[0]
-            keep = (idx == False)
+            keep = np.logical_not(idx)
             if merge > 0:
                 # drop the earlier event
                 steps[where + 1, 1] = steps[where, 1]
@@ -331,10 +359,12 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
     ----------
     raw : Raw object
         The raw data.
-    pad_start, pad_stop : None | int
+    pad_start: None | int
         Values to assume outside of the stim channel (e.g., if pad_start=0 and
         the stim channel starts with value 5, an event of [0, 0, 5] will be
         inserted at the beginning). With None, no steps will be inserted.
+    pad_stop : None | int
+        Values to assume outside of the stim channel, see ``pad_start``.
     merge : int
         Merge steps occurring in neighboring samples. The integer value
         indicates over how many samples events should be merged, and the sign
@@ -361,7 +391,7 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
     """
 
     # pull stim channel from config if necessary
-    stim_channel = _get_stim_channel(stim_channel)
+    stim_channel = _get_stim_channel(stim_channel, raw.info)
 
     picks = pick_channels(raw.info['ch_names'], include=stim_channel)
     if len(picks) == 0:
@@ -379,7 +409,7 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0,
 
 @verbose
 def _find_events(data, first_samp, verbose=None, output='onset',
-                 consecutive='increasing', min_samples=0):
+                 consecutive='increasing', min_samples=0, mask=0):
     """Helper function for find events"""
     if min_samples > 0:
         merge = int(min_samples // 1)
@@ -395,6 +425,7 @@ def _find_events(data, first_samp, verbose=None, output='onset',
     data = data.astype(np.int)
 
     events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge)
+    events = _mask_trigs(events, mask)
 
     # Determine event onsets and offsets
     if consecutive == 'increasing':
@@ -446,7 +477,7 @@ def _find_events(data, first_samp, verbose=None, output='onset',
 @verbose
 def find_events(raw, stim_channel=None, verbose=None, output='onset',
                 consecutive='increasing', min_duration=0,
-                shortest_event=2):
+                shortest_event=2, mask=0):
     """Find events from raw file
 
     Parameters
@@ -457,8 +488,9 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
         Name of the stim channel or all the stim channels
         affected by the trigger. If None, the config variables
         'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
-        etc. are read. If these are not found, it will default to
-        'STI 014'.
+        etc. are read. If these are not found, it will fall back to
+        'STI 014' if present, then fall back to the first channel of type
+        'stim', if present.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     output : 'onset' | 'offset' | 'step'
@@ -476,6 +508,9 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     shortest_event : int
         Minimum number of samples an event must last (default is 2). If the
         duration is less than this an exception will be raised.
+    mask : int
+        The value of the digital mask to apply to the stim channel values.
+        The default value is 0.
 
     Returns
     -------
@@ -492,20 +527,20 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     Consider data with a stim channel that looks like: [0, 32, 32, 33, 32, 0]
 
     By default, find_events returns all samples at which the value of the
-    stim channel increases:
+    stim channel increases::
 
         >>> print(find_events(raw)) # doctest: +SKIP
         [[ 1  0 32]
          [ 3 32 33]]
 
     If consecutive is False, find_events only returns the samples at which
-    the stim channel changes from zero to a non-zero value:
+    the stim channel changes from zero to a non-zero value::
 
         >>> print(find_events(raw, consecutive=False)) # doctest: +SKIP
         [[ 1  0 32]]
 
     If consecutive is True, find_events returns samples at which the
-    event changes, regardless of whether it first returns to zero:
+    event changes, regardless of whether it first returns to zero::
 
         >>> print(find_events(raw, consecutive=True)) # doctest: +SKIP
         [[ 1  0 32]
@@ -513,7 +548,7 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
          [ 4 33 32]]
 
     If output is 'offset', find_events returns the last sample of each event
-    instead of the first one:
+    instead of the first one::
 
         >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
         ...                   output='offset'))
@@ -522,7 +557,7 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
          [ 4  0 32]]
 
     If output is 'step', find_events returns the samples at which an event
-    starts or ends:
+    starts or ends::
 
         >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
         ...                   output='step'))
@@ -533,12 +568,20 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
 
     To ignore spurious events, it is also possible to specify a minimum
     event duration. Assuming our events channel has a sample rate of
-    1000 Hz:
+    1000 Hz::
 
         >>> print(find_events(raw, consecutive=True, # doctest: +SKIP
         ...                   min_duration=0.002))
         [[ 1  0 32]]
 
+    For the digital mask, it will take the binary representation of the
+    digital mask, e.g. 5 -> '00000101', and will block the values
+    where mask is one, e.g.::
+
+              7 '0000111' <- trigger value
+             37 '0100101' <- mask
+         ----------------
+              2 '0000010'
 
     See Also
     --------
@@ -547,7 +590,7 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     min_samples = min_duration * raw.info['sfreq']
 
     # pull stim channel from config if necessary
-    stim_channel = _get_stim_channel(stim_channel)
+    stim_channel = _get_stim_channel(stim_channel, raw.info)
 
     pick = pick_channels(raw.info['ch_names'], include=stim_channel)
     if len(pick) == 0:
@@ -555,7 +598,8 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     data, _ = raw[pick, :]
 
     events = _find_events(data, raw.first_samp, verbose=verbose, output=output,
-                          consecutive=consecutive, min_samples=min_samples)
+                          consecutive=consecutive, min_samples=min_samples,
+                          mask=mask)
 
     # add safety check for spurious events (for ex. from neuromag syst.) by
     # checking the number of low sample events
@@ -570,6 +614,22 @@ def find_events(raw, stim_channel=None, verbose=None, output='onset',
     return events
 
 
+def _mask_trigs(events, mask):
+    """Helper function for masking digital trigger values"""
+    if not isinstance(mask, int):
+        raise TypeError('You provided a(n) %s. Mask must be an int.'
+                        % type(mask))
+    n_events = len(events)
+    if n_events == 0:
+        return events.copy()
+
+    mask = np.bitwise_not(mask)
+    events[:, 1:] = np.bitwise_and(events[:, 1:], mask)
+    events = events[events[:, 1] != events[:, 2]]
+
+    return events
+
+
 def merge_events(events, ids, new_id, replace_events=True):
     """Merge a set of events
 
diff --git a/mne/evoked.py b/mne/evoked.py
index 77a884d..fdd9c60 100644
--- a/mne/evoked.py
+++ b/mne/evoked.py
@@ -2,6 +2,7 @@
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Denis Engemann <denis.engemann at gmail.com>
 #          Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#          Mads Jensen <mje.mads at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -10,14 +11,15 @@ import numpy as np
 import warnings
 
 from .baseline import rescale
-from .channels import ContainsMixin, PickDropChannelsMixin
-from .filter import resample, detrend
+from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                SetChannelsMixin, InterpolationMixin,
+                                equalize_channels)
+from .filter import resample, detrend, FilterMixin
 from .fixes import in1d
-from .utils import (_check_pandas_installed, check_fname, logger, verbose,
-                    deprecated, object_hash)
-from .viz import plot_evoked, plot_evoked_topomap, _mutable_defaults
-from .viz import plot_evoked_field
-from .viz import plot_evoked_image
+from .utils import check_fname, logger, verbose, object_hash, _time_mask
+from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
+                  plot_evoked_image, plot_evoked_topo)
+from .viz.evoked import _plot_evoked_white
 from .externals.six import string_types
 
 from .io.constants import FIFF
@@ -30,14 +32,17 @@ from .io.proj import ProjMixin
 from .io.write import (start_file, start_block, end_file, end_block,
                        write_int, write_string, write_float_matrix,
                        write_id)
+from .io.base import ToDataFrameMixin
 
-aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
-               'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
-aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
-              str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
+_aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
+                'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
+_aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
+               str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
 
 
-class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
+class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+             SetChannelsMixin, InterpolationMixin, FilterMixin,
+             ToDataFrameMixin):
     """Evoked data
 
     Parameters
@@ -89,173 +94,162 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
     """
     @verbose
     def __init__(self, fname, condition=None, baseline=None, proj=True,
-                 kind='average', verbose=None, setno=None):
+                 kind='average', verbose=None):
 
         if fname is None:
             raise ValueError('No evoked filename specified')
 
-        if condition is None and setno is not None:
-            condition = setno
-            msg = ("'setno' will be deprecated in 0.9. Use 'condition' "
-                   "instead.")
-            warnings.warn(msg, DeprecationWarning)
-
         self.verbose = verbose
         logger.info('Reading %s ...' % fname)
-        fid, tree, _ = fiff_open(fname)
-        if not isinstance(proj, bool):
-            raise ValueError(r"'proj' must be 'True' or 'False'")
-
-        #   Read the measurement info
-        info, meas = read_meas_info(fid, tree)
-        info['filename'] = fname
-
-        #   Locate the data of interest
-        processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
-        if len(processed) == 0:
-            fid.close()
-            raise ValueError('Could not find processed data')
-
-        evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
-        if len(evoked_node) == 0:
-            fid.close()
-            raise ValueError('Could not find evoked data')
-
-        # find string-based entry
-        if isinstance(condition, string_types):
-            if not kind in aspect_dict.keys():
-                fid.close()
-                raise ValueError('kind must be "average" or '
-                                 '"standard_error"')
-
-            comments, aspect_kinds, t = _get_entries(fid, evoked_node)
-            goods = np.logical_and(in1d(comments, [condition]),
-                                   in1d(aspect_kinds, [aspect_dict[kind]]))
-            found_cond = np.where(goods)[0]
-            if len(found_cond) != 1:
-                fid.close()
-                raise ValueError('condition "%s" (%s) not found, out of found '
-                                 'datasets:\n  %s' % (condition, kind, t))
-            condition = found_cond[0]
-
-        if condition >= len(evoked_node) or condition < 0:
-            fid.close()
-            raise ValueError('Data set selector out of range')
-
-        my_evoked = evoked_node[condition]
-
-        # Identify the aspects
-        aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
-        if len(aspects) > 1:
-            logger.info('Multiple aspects found. Taking first one.')
-        my_aspect = aspects[0]
-
-        # Now find the data in the evoked block
-        nchan = 0
-        sfreq = -1
-        chs = []
-        comment = None
-        for k in range(my_evoked['nent']):
-            my_kind = my_evoked['directory'][k].kind
-            pos = my_evoked['directory'][k].pos
-            if my_kind == FIFF.FIFF_COMMENT:
-                tag = read_tag(fid, pos)
-                comment = tag.data
-            elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
-                tag = read_tag(fid, pos)
-                first = int(tag.data)
-            elif my_kind == FIFF.FIFF_LAST_SAMPLE:
-                tag = read_tag(fid, pos)
-                last = int(tag.data)
-            elif my_kind == FIFF.FIFF_NCHAN:
-                tag = read_tag(fid, pos)
-                nchan = int(tag.data)
-            elif my_kind == FIFF.FIFF_SFREQ:
-                tag = read_tag(fid, pos)
-                sfreq = float(tag.data)
-            elif my_kind == FIFF.FIFF_CH_INFO:
-                tag = read_tag(fid, pos)
-                chs.append(tag.data)
-
-        if comment is None:
-            comment = 'No comment'
-
-        #   Local channel information?
-        if nchan > 0:
-            if chs is None:
+        f, tree, _ = fiff_open(fname)
+        with f as fid:
+            if not isinstance(proj, bool):
+                raise ValueError(r"'proj' must be 'True' or 'False'")
+
+            #   Read the measurement info
+            info, meas = read_meas_info(fid, tree)
+            info['filename'] = fname
+
+            #   Locate the data of interest
+            processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
+            if len(processed) == 0:
+                raise ValueError('Could not find processed data')
+
+            evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
+            if len(evoked_node) == 0:
+                raise ValueError('Could not find evoked data')
+
+            # find string-based entry
+            if isinstance(condition, string_types):
+                if kind not in _aspect_dict.keys():
+                    raise ValueError('kind must be "average" or '
+                                     '"standard_error"')
+
+                comments, aspect_kinds, t = _get_entries(fid, evoked_node)
+                goods = np.logical_and(in1d(comments, [condition]),
+                                       in1d(aspect_kinds,
+                                            [_aspect_dict[kind]]))
+                found_cond = np.where(goods)[0]
+                if len(found_cond) != 1:
+                    raise ValueError('condition "%s" (%s) not found, out of '
+                                     'found datasets:\n  %s'
+                                     % (condition, kind, t))
+                condition = found_cond[0]
+
+            if condition >= len(evoked_node) or condition < 0:
                 fid.close()
-                raise ValueError('Local channel information was not found '
-                                 'when it was expected.')
-
-            if len(chs) != nchan:
-                fid.close()
-                raise ValueError('Number of channels and number of '
-                                 'channel definitions are different')
-
-            info['chs'] = chs
-            info['nchan'] = nchan
-            logger.info('    Found channel information in evoked data. '
-                        'nchan = %d' % nchan)
-            if sfreq > 0:
-                info['sfreq'] = sfreq
-
-        nsamp = last - first + 1
-        logger.info('    Found the data of interest:')
-        logger.info('        t = %10.2f ... %10.2f ms (%s)'
-                    % (1000 * first / info['sfreq'],
-                       1000 * last / info['sfreq'], comment))
-        if info['comps'] is not None:
-            logger.info('        %d CTF compensation matrices available'
-                        % len(info['comps']))
-
-        # Read the data in the aspect block
-        nave = 1
-        epoch = []
-        for k in range(my_aspect['nent']):
-            kind = my_aspect['directory'][k].kind
-            pos = my_aspect['directory'][k].pos
-            if kind == FIFF.FIFF_COMMENT:
-                tag = read_tag(fid, pos)
-                comment = tag.data
-            elif kind == FIFF.FIFF_ASPECT_KIND:
-                tag = read_tag(fid, pos)
-                aspect_kind = int(tag.data)
-            elif kind == FIFF.FIFF_NAVE:
-                tag = read_tag(fid, pos)
-                nave = int(tag.data)
-            elif kind == FIFF.FIFF_EPOCH:
-                tag = read_tag(fid, pos)
-                epoch.append(tag)
-
-        logger.info('        nave = %d - aspect type = %d'
-                    % (nave, aspect_kind))
-
-        nepoch = len(epoch)
-        if nepoch != 1 and nepoch != info['nchan']:
-            fid.close()
-            raise ValueError('Number of epoch tags is unreasonable '
-                             '(nepoch = %d nchan = %d)'
-                             % (nepoch, info['nchan']))
-
-        if nepoch == 1:
-            # Only one epoch
-            all_data = epoch[0].data.astype(np.float)
-            # May need a transpose if the number of channels is one
-            if all_data.shape[1] == 1 and info['nchan'] == 1:
-                all_data = all_data.T.astype(np.float)
-        else:
-            # Put the old style epochs together
-            all_data = np.concatenate([e.data[None, :] for e in epoch],
-                                      axis=0).astype(np.float)
-
-        if all_data.shape[1] != nsamp:
-            fid.close()
-            raise ValueError('Incorrect number of samples (%d instead of %d)'
-                             % (all_data.shape[1], nsamp))
+                raise ValueError('Data set selector out of range')
+
+            my_evoked = evoked_node[condition]
+
+            # Identify the aspects
+            aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
+            if len(aspects) > 1:
+                logger.info('Multiple aspects found. Taking first one.')
+            my_aspect = aspects[0]
+
+            # Now find the data in the evoked block
+            nchan = 0
+            sfreq = -1
+            chs = []
+            comment = None
+            for k in range(my_evoked['nent']):
+                my_kind = my_evoked['directory'][k].kind
+                pos = my_evoked['directory'][k].pos
+                if my_kind == FIFF.FIFF_COMMENT:
+                    tag = read_tag(fid, pos)
+                    comment = tag.data
+                elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
+                    tag = read_tag(fid, pos)
+                    first = int(tag.data)
+                elif my_kind == FIFF.FIFF_LAST_SAMPLE:
+                    tag = read_tag(fid, pos)
+                    last = int(tag.data)
+                elif my_kind == FIFF.FIFF_NCHAN:
+                    tag = read_tag(fid, pos)
+                    nchan = int(tag.data)
+                elif my_kind == FIFF.FIFF_SFREQ:
+                    tag = read_tag(fid, pos)
+                    sfreq = float(tag.data)
+                elif my_kind == FIFF.FIFF_CH_INFO:
+                    tag = read_tag(fid, pos)
+                    chs.append(tag.data)
+
+            if comment is None:
+                comment = 'No comment'
+
+            #   Local channel information?
+            if nchan > 0:
+                if chs is None:
+                    raise ValueError('Local channel information was not found '
+                                     'when it was expected.')
+
+                if len(chs) != nchan:
+                    raise ValueError('Number of channels and number of '
+                                     'channel definitions are different')
+
+                info['chs'] = chs
+                info['nchan'] = nchan
+                logger.info('    Found channel information in evoked data. '
+                            'nchan = %d' % nchan)
+                if sfreq > 0:
+                    info['sfreq'] = sfreq
+
+            nsamp = last - first + 1
+            logger.info('    Found the data of interest:')
+            logger.info('        t = %10.2f ... %10.2f ms (%s)'
+                        % (1000 * first / info['sfreq'],
+                           1000 * last / info['sfreq'], comment))
+            if info['comps'] is not None:
+                logger.info('        %d CTF compensation matrices available'
+                            % len(info['comps']))
+
+            # Read the data in the aspect block
+            nave = 1
+            epoch = []
+            for k in range(my_aspect['nent']):
+                kind = my_aspect['directory'][k].kind
+                pos = my_aspect['directory'][k].pos
+                if kind == FIFF.FIFF_COMMENT:
+                    tag = read_tag(fid, pos)
+                    comment = tag.data
+                elif kind == FIFF.FIFF_ASPECT_KIND:
+                    tag = read_tag(fid, pos)
+                    aspect_kind = int(tag.data)
+                elif kind == FIFF.FIFF_NAVE:
+                    tag = read_tag(fid, pos)
+                    nave = int(tag.data)
+                elif kind == FIFF.FIFF_EPOCH:
+                    tag = read_tag(fid, pos)
+                    epoch.append(tag)
+
+            logger.info('        nave = %d - aspect type = %d'
+                        % (nave, aspect_kind))
+
+            nepoch = len(epoch)
+            if nepoch != 1 and nepoch != info['nchan']:
+                raise ValueError('Number of epoch tags is unreasonable '
+                                 '(nepoch = %d nchan = %d)'
+                                 % (nepoch, info['nchan']))
+
+            if nepoch == 1:
+                # Only one epoch
+                all_data = epoch[0].data.astype(np.float)
+                # May need a transpose if the number of channels is one
+                if all_data.shape[1] == 1 and info['nchan'] == 1:
+                    all_data = all_data.T.astype(np.float)
+            else:
+                # Put the old style epochs together
+                all_data = np.concatenate([e.data[None, :] for e in epoch],
+                                          axis=0).astype(np.float)
+
+            if all_data.shape[1] != nsamp:
+                raise ValueError('Incorrect number of samples (%d instead of '
+                                 ' %d)' % (all_data.shape[1], nsamp))
 
         # Calibrate
-        cals = np.array([info['chs'][k]['cal']
-                         * info['chs'][k].get('scale', 1.0)
+        cals = np.array([info['chs'][k]['cal'] *
+                         info['chs'][k].get('scale', 1.0)
                          for k in range(info['nchan'])])
         all_data *= cals[:, np.newaxis]
 
@@ -265,7 +259,7 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         # Put the rest together all together
         self.nave = nave
         self._aspect_kind = aspect_kind
-        self.kind = aspect_rev.get(str(self._aspect_kind), 'Unknown')
+        self.kind = _aspect_rev.get(str(self._aspect_kind), 'Unknown')
         self.first = first
         self.last = last
         self.comment = comment
@@ -274,14 +268,11 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         # bind info, proj, data to self so apply_proj can be used
         self.data = all_data
-        self.proj = False
         if proj:
             self.apply_proj()
         # Run baseline correction
         self.data = rescale(self.data, times, baseline, 'mean', copy=False)
 
-        fid.close()
-
     def save(self, fname):
         """Save dataset to file.
 
@@ -301,21 +292,28 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
     @property
     def ch_names(self):
+        """Channel names"""
         return self.info['ch_names']
 
-    def crop(self, tmin=None, tmax=None):
+    def crop(self, tmin=None, tmax=None, copy=False):
         """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
         """
-        times = self.times
-        mask = np.ones(len(times), dtype=np.bool)
-        if tmin is not None:
-            mask = mask & (times >= tmin)
-        if tmax is not None:
-            mask = mask & (times <= tmax)
-        self.times = times[mask]
-        self.first = int(self.times[0] * self.info['sfreq'])
-        self.last = len(self.times) + self.first - 1
-        self.data = self.data[:, mask]
+        inst = self if not copy else self.copy()
+        mask = _time_mask(inst.times, tmin, tmax)
+        inst.times = inst.times[mask]
+        inst.first = int(inst.times[0] * inst.info['sfreq'])
+        inst.last = len(inst.times) + inst.first - 1
+        inst.data = inst.data[:, mask]
+        return inst
 
     def shift_time(self, tshift, relative=True):
         """Shift time scale in evoked data
@@ -346,10 +344,14 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                                dtype=np.float) / sfreq
 
     def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
-             proj=False, xlim='tight', hline=None, units=None, scalings=None,
-             titles=None, axes=None):
+             xlim='tight', proj=False, hline=None, units=None, scalings=None,
+             titles=None, axes=None, gfp=False):
         """Plot evoked data as butterfly plots
 
+        Left click to a line shows the channel name. Selecting an area by
+        clicking and holding left mouse button plots a topographic map of the
+        painted area.
+
         Note: If bad channels are not excluded they are shown in red.
 
         Parameters
@@ -364,7 +366,7 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         show : bool
             Call pyplot.show() at the end or not.
         ylim : dict
-            ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
+            ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e-6])
             Valid keys are eeg, mag, grad
         xlim : 'tight' | tuple | None
             xlim for plots.
@@ -387,14 +389,17 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             The axes to plot to. If list, the list must be a list of Axes of
             the same length as the number of channel types. If instance of
             Axes, there must be only one channel type plotted.
+        gfp : bool | 'only'
+            Plot GFP in green if True or "only". If "only", then the individual
+            channel traces will not be shown.
         """
         return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
                            show=show, ylim=ylim, proj=proj, xlim=xlim,
                            hline=hline, units=units, scalings=scalings,
-                           titles=titles, axes=axes)
+                           titles=titles, axes=axes, gfp=gfp)
 
     def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
-                   clim=None, proj=False, xlim='tight', units=None,
+                   clim=None, xlim='tight', proj=False, units=None,
                    scalings=None, titles=None, axes=None, cmap='RdBu_r'):
         """Plot evoked data as images
 
@@ -439,24 +444,96 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                                  units=units, scalings=scalings,
                                  titles=titles, axes=axes, cmap=cmap)
 
-    def plot_topomap(self, times=None, ch_type='mag', layout=None, vmin=None,
-                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
+    def plot_topo(self, layout=None, layout_scale=0.945, color=None,
+                  border='none', ylim=None, scalings=None, title=None,
+                  proj=False, vline=[0.0], fig_facecolor='k',
+                  fig_background=None, axis_facecolor='k', font_color='w',
+                  show=True):
+        """Plot 2D topography of evoked responses.
+
+        Clicking on the plot of an individual sensor opens a new figure showing
+        the evoked response for the selected sensor.
+
+        Parameters
+        ----------
+        layout : instance of Layout | None
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct layout is
+            inferred from the data.
+        layout_scale: float
+            Scaling factor for adjusting the relative size of the layout
+            on the canvas
+        color : list of color objects | color object | None
+            Everything matplotlib accepts to specify colors. If not list-like,
+            the color specified will be repeated. If None, colors are
+            automatically drawn.
+        border : str
+            matplotlib borders style to be used for each sensor plot.
+        ylim : dict | None
+            ylim for plots. The value determines the upper and lower subplot
+            limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+            mag, grad, misc. If None, the ylim parameter for each channel is
+            determined by the maximum absolute peak.
+        scalings : dict | None
+            The scalings of the channel types to be applied for plotting. If
+            None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+        title : str
+            Title of the figure.
+        proj : bool | 'interactive'
+            If true SSP projections are applied before display. If
+            'interactive', a check box for reversible selection of SSP
+            projection vectors will be shown.
+        vline : list of floats | None
+            The values at which to show a vertical line.
+        fig_facecolor : str | obj
+            The figure face color. Defaults to black.
+        fig_background : None | numpy ndarray
+            A background image for the figure. This must work with a call to
+            plt.imshow. Defaults to None.
+        axis_facecolor : str | obj
+            The face color to be used for each sensor plot. Defaults to black.
+        font_color : str | obj
+            The color of text in the colorbar and title. Defaults to white.
+        show : bool
+            Show figure if True.
+
+        Returns
+        -------
+        fig : Instance of matplotlib.figure.Figure
+            Images of evoked responses at sensor locations
+
+        .. versionadded:: 0.10.0
+        """
+        return plot_evoked_topo(self, layout=layout, layout_scale=layout_scale,
+                                color=color, border=border, ylim=ylim,
+                                scalings=scalings, title=title, proj=proj,
+                                vline=vline, fig_facecolor=fig_facecolor,
+                                fig_background=fig_background,
+                                axis_facecolor=axis_facecolor,
+                                font_color=font_color, show=show)
+
+    def plot_topomap(self, times="auto", ch_type=None, layout=None, vmin=None,
+                     vmax=None, cmap='RdBu_r', sensors=True, colorbar=True,
                      scale=None, scale_time=1e3, unit=None, res=64, size=1,
-                     format="%3.1f", time_format='%01d ms', proj=False,
+                     cbar_fmt="%3.1f", time_format='%01d ms', proj=False,
                      show=True, show_names=False, title=None, mask=None,
                      mask_params=None, outlines='head', contours=6,
-                     image_interp='bilinear'):
+                     image_interp='bilinear', average=None, head_pos=None,
+                     axes=None):
         """Plot topographic maps of specific time points
 
         Parameters
         ----------
-        times : float | array of floats | None.
-            The time point(s) to plot. If None, 10 topographies will be shown
-            will a regular time spacing between the first and last time
-            instant.
-        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        times : float | array of floats | "auto" | "peaks".
+            The time point(s) to plot. If "auto", the number of ``axes``
+            determines the amount of time point(s). If ``axes`` is also None,
+            10 topographies will be shown with a regular time spacing between
+            the first and last time instant. If "peaks", finds time points
+            automatically by checking for local maxima in Global Field Power.
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
             The channel type to plot. For 'grad', the gradiometers are collec-
             ted in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
         layout : None | Layout
             Layout instance specifying sensor positions (does not need to
             be specified for Neuromag data). If possible, the correct
@@ -470,32 +547,33 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         vmax : float | callable
             The value specfying the upper bound of the color range.
             If None, the maximum absolute value is used. If vmin is None,
-            but vmax is not, defaults to np.min(data).
+            but vmax is not, defaults to np.max(data).
             If callable, the output equals vmax(data).
         cmap : matplotlib colormap
-            Colormap. Defaults to 'RdBu_r'
+            Colormap. Defaults to 'RdBu_r'.
         sensors : bool | str
             Add markers for sensor locations to the plot. Accepts matplotlib
-            plot format string (e.g., 'r+' for red plusses).
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
         colorbar : bool
             Plot a colorbar.
-        scale : float | None
+        scale : dict | float | None
             Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
             for grad and 1e15 for mag.
         scale_time : float | None
             Scale the time labels. Defaults to 1e3 (ms).
-        units : str | None
-            The units of the channel types used for colorbar lables. If
-            scale == None the unit is automatically determined.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
         res : int
             The resolution of the topomap image (n pixels along each side).
         size : scalar
             Side length of the topomaps in inches (only applies when plotting
             multiple topomaps at a time).
-        format : str
+        cbar_fmt : str
             String format for colorbar values.
         time_format : str
-            String format for topomap values. Defaults to "%01d ms"
+            String format for topomap values. Defaults to ``"%01d ms"``.
         proj : bool | 'interactive'
             If true SSP projections are applied before display. If
             'interactive', a check box for reversible selection of SSP
@@ -517,17 +595,40 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         mask_params : dict | None
             Additional plotting parameters for plotting significant sensors.
             Default (None) equals:
-            dict(marker='o', markerfacecolor='w', markeredgecolor='k',
-                 linewidth=0, markersize=4)
-        outlines : 'head' | dict | None
-            The outlines to be drawn. If 'head', a head scheme will be drawn.
-            If dict, each key refers to a tuple of x and y positions. The
-            values in 'mask_pos' will serve as image mask. If None,
-            nothing will be drawn. Defaults to 'head'.
+            ``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+            linewidth=0, markersize=4)``.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        contours : int | False | None
+            The number of contour lines to draw. If 0, no contours will be
+            drawn.
         image_interp : str
             The image interpolation to be used. All matplotlib options are
             accepted.
-
+        average : float | None
+            The time window around a given time to be used for averaging
+            (seconds). For example, 0.01 would translate into window that
+            starts 5 ms before and ends 5 ms after a given time point.
+            Defaults to None, which means no averaging.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as ``times`` (unless ``times`` is None). If
+            instance of Axes, ``times`` must be a float or a list of one float.
+            Defaults to None.
         """
         return plot_evoked_topomap(self, times=times, ch_type=ch_type,
                                    layout=layout, vmin=vmin,
@@ -535,12 +636,14 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                                    colorbar=colorbar, scale=scale,
                                    scale_time=scale_time,
                                    unit=unit, res=res, proj=proj, size=size,
-                                   format=format, time_format=time_format,
+                                   cbar_fmt=cbar_fmt, time_format=time_format,
                                    show=show, show_names=show_names,
                                    title=title, mask=mask,
                                    mask_params=mask_params,
                                    outlines=outlines, contours=contours,
-                                   image_interp=image_interp)
+                                   image_interp=image_interp,
+                                   average=average, head_pos=head_pos,
+                                   axes=axes)
 
     def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
                    n_jobs=1):
@@ -566,102 +669,74 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         return plot_evoked_field(self, surf_maps, time=time,
                                  time_label=time_label, n_jobs=n_jobs)
 
-    def to_nitime(self, picks=None):
-        """Export Evoked object to NiTime
+    def plot_white(self, noise_cov, show=True):
+        """Plot whitened evoked response
+
+        Plots the whitened evoked response and the whitened GFP as described in
+        [1]_. If one single covariance object is passed, the GFP panel (bottom)
+        will depict different sensor types. If multiple covariance objects are
+        passed as a list, the left column will display the whitened evoked
+        responses for each channel based on the whitener from the noise
+        covariance that has the highest log-likelihood. The left column will
+        depict the whitened GFPs based on each estimator separately for each
+        sensor type. Instead of numbers of channels the GFP display shows the
+        estimated rank. The rank estimation will be printed by the logger for
+        each noise covariance estimator that is passed.
+
 
         Parameters
         ----------
-        picks : array-like of int | None
-            Indices of channels to apply. If None, all channels will be
-            exported.
+        noise_cov : list | instance of Covariance | str
+            The noise covariance as computed by ``mne.cov.compute_covariance``.
+        show : bool
+            Whether to show the figure or not. Defaults to True.
 
         Returns
         -------
-        evoked_ts : instance of nitime.TimeSeries
-            The TimeSeries instance
-        """
-        try:
-            from nitime import TimeSeries  # to avoid strong dependency
-        except ImportError:
-            raise Exception('the nitime package is missing')
+        fig : instance of matplotlib.figure.Figure
+            The figure object containing the plot.
 
-        evoked_ts = TimeSeries(self.data if picks is None
-                               else self.data[picks],
-                               sampling_rate=self.info['sfreq'])
-        return evoked_ts
+        References
+        ----------
+        .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+               covariance estimation and spatial whitening of MEG and EEG
+               signals, vol. 108, 328-342, NeuroImage.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        return _plot_evoked_white(self, noise_cov=noise_cov, scalings=None,
+                                  rank=None, show=show)
 
-    def as_data_frame(self, picks=None, scale_time=1e3, scalings=None,
-                      use_time_index=True, copy=True):
-        """Get the Evoked object as a Pandas DataFrame
+    def as_type(self, ch_type='grad', mode='fast'):
+        """Compute virtual evoked using interpolated fields in mag/grad channels.
 
-        Export data in tabular structure: each row corresponds to a time point,
-        and each column to a channel.
+        .. Warning:: Using virtual evoked to compute inverse can yield
+            unexpected results. The virtual channels have `'_virtual'` appended
+            at the end of the names to emphasize that the data contained in
+            them are interpolated.
 
         Parameters
         ----------
-        picks : array-like of int | None
-            If None all channels are kept, otherwise the channels indices in
-            picks are kept.
-        scale_time : float
-            Scaling to be applied to time units.
-        scalings : dict | None
-            Scaling to be applied to the channels picked. If None, defaults to
-            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
-        use_time_index : bool
-            If False, times will be included as in the data table, else it will
-            be used as index object.
-        copy : bool
-            If true, evoked will be copied. Else data may be modified in place.
+        ch_type : str
+            The destination channel type. It can be 'mag' or 'grad'.
+        mode : str
+            Either `'accurate'` or `'fast'`, determines the quality of the
+            Legendre polynomial expansion used. `'fast'` should be sufficient
+            for most applications.
 
         Returns
         -------
-        df : instance of pandas.core.DataFrame
-            Evoked data exported into tabular data structure.
-        """
-
-        pd = _check_pandas_installed()
-
-        if picks is None:
-            picks = list(range(self.info['nchan']))
-        else:
-            if not in1d(picks, np.arange(len(self.ch_names))).all():
-                raise ValueError('At least one picked channel is not present '
-                                 'in this Evoked instance.')
-
-        data, times = self.data, self.times
+        evoked : instance of mne.Evoked
+            The transformed evoked object containing only virtual channels.
 
-        if copy is True:
-            data = data.copy()
-
-        types = [channel_type(self.info, idx) for idx in picks]
-        n_channel_types = 0
-        ch_types_used = []
-
-        scalings = _mutable_defaults(('scalings', scalings))[0]
-        for t in scalings.keys():
-            if t in types:
-                n_channel_types += 1
-                ch_types_used.append(t)
-
-        for t in ch_types_used:
-            scaling = scalings[t]
-            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
-            if len(idx) > 0:
-                data[idx] *= scaling
-
-        assert times.shape[0] == data.shape[1]
-        col_names = [self.ch_names[k] for k in picks]
-
-        df = pd.DataFrame(data.T, columns=col_names)
-        df.insert(0, 'time', times * scale_time)
-
-        if use_time_index is True:
-            if 'time' in df:
-                df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(record=True):
-                df.set_index('time', inplace=True)
-
-        return df
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        from .forward import _as_meg_type_evoked
+        return _as_meg_type_evoked(self, ch_type=ch_type, mode=mode)
 
     def resample(self, sfreq, npad=100, window='boxcar'):
         """Resample data
@@ -681,8 +756,8 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
         # adjust indirectly affected variables
         self.info['sfreq'] = sfreq
-        self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq
-                      + self.times[0])
+        self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq +
+                      self.times[0])
         self.first = int(self.times[0] * self.info['sfreq'])
         self.last = len(self.times) + self.first - 1
 
@@ -717,7 +792,7 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
     def __add__(self, evoked):
         """Add evoked taking into account number of epochs"""
-        out = merge_evoked([self, evoked])
+        out = combine_evoked([self, evoked])
         out.comment = self.comment + " + " + evoked.comment
         return out
 
@@ -725,8 +800,12 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         """Add evoked taking into account number of epochs"""
         this_evoked = deepcopy(evoked)
         this_evoked.data *= -1.
-        out = merge_evoked([self, this_evoked])
-        out.comment = self.comment + " - " + this_evoked.comment
+        out = combine_evoked([self, this_evoked])
+        if self.comment is None or this_evoked.comment is None:
+            warnings.warn('evoked.comment expects a string but is None')
+            out.comment = 'unknown'
+        else:
+            out.comment = self.comment + " - " + this_evoked.comment
         return out
 
     def __hash__(self):
@@ -775,8 +854,7 @@ class Evoked(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         elif ch_type is not None and ch_type not in types_used:
             raise ValueError('Channel type `{ch_type}` not found in this '
-                             'evoked object.'
-                              .format(ch_type=ch_type))
+                             'evoked object.'.format(ch_type=ch_type))
 
         elif len(types_used) > 1 and ch_type is None:
             raise RuntimeError('More than one sensor type found. `ch_type` '
@@ -827,6 +905,10 @@ class EvokedArray(Evoked):
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         Defaults to raw.verbose.
+
+    See Also
+    --------
+    EpochsArray, io.RawArray, create_info
     """
 
     @verbose
@@ -841,29 +923,28 @@ class EvokedArray(Evoked):
                              'n_samples)')
 
         if len(info['ch_names']) != np.shape(data)[0]:
-            raise ValueError('Info and data must have same number of '
-                             'channels.')
+            raise ValueError('Info (%s) and data (%s) must have same number '
+                             'of channels.' % (len(info['ch_names']),
+                                               np.shape(data)[0]))
 
         self.data = data
 
         # XXX: this should use round and be tested
         self.first = int(tmin * info['sfreq'])
         self.last = self.first + np.shape(data)[-1] - 1
-        self.times = np.arange(self.first, self.last + 1, dtype=np.float)
-        self.times /= info['sfreq']
-
+        self.times = np.arange(self.first, self.last + 1,
+                               dtype=np.float) / info['sfreq']
         self.info = info
         self.nave = nave
         self.kind = kind
         self.comment = comment
-        self.proj = None
         self.picks = None
         self.verbose = verbose
         self._projector = None
         if self.kind == 'average':
-            self._aspect_kind = aspect_dict['average']
+            self._aspect_kind = _aspect_dict['average']
         else:
-            self._aspect_kind = aspect_dict['standard_error']
+            self._aspect_kind = _aspect_dict['standard_error']
 
 
 def _get_entries(fid, evoked_node):
@@ -890,7 +971,7 @@ def _get_entries(fid, evoked_node):
         fid.close()
         raise ValueError('Dataset names in FIF file '
                          'could not be found.')
-    t = [aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
+    t = [_aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
     t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
     t = '  ' + '\n  '.join(t)
     return comments, aspect_kinds, t
@@ -905,22 +986,94 @@ def _get_evoked_node(fname):
     return evoked_node
 
 
-def merge_evoked(all_evoked):
-    """Merge/concat evoked data
+def grand_average(all_evoked, interpolate_bads=True):
+    """Make grand average of a list evoked data
+
+    The function interpolates bad channels based on `interpolate_bads`
+    parameter. If `interpolate_bads` is True, the grand average
+    file will contain good channels and the bad channels interpolated
+    from the good MEG/EEG channels.
+
+    The grand_average.nave attribute will be equal the number
+    of evoked datasets used to calculate the grand average.
+
+    Note: Grand average evoked shall not be used for source localization.
+
+    Parameters
+    ----------
+    all_evoked : list of Evoked data
+        The evoked datasets.
+    interpolate_bads : bool
+        If True, bad MEG and EEG channels are interpolated.
+
+    Returns
+    -------
+    grand_average : Evoked
+        The grand average data.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # check if all elements in the given list are evoked data
+    if not all(isinstance(e, Evoked) for e in all_evoked):
+        raise ValueError("Not all the elements in list are evoked data")
+
+    # Copy channels to leave the original evoked datasets intact.
+    all_evoked = [e.copy() for e in all_evoked]
+
+    # Interpolates if necessary
+    if interpolate_bads:
+        all_evoked = [e.interpolate_bads() if len(e.info['bads']) > 0
+                      else e for e in all_evoked]
+
+    equalize_channels(all_evoked)  # apply equalize_channels
+    # make grand_average object using combine_evoked
+    grand_average = combine_evoked(all_evoked, weights='equal')
+    # change the grand_average.nave to the number of Evokeds
+    grand_average.nave = len(all_evoked)
+    # change comment field
+    grand_average.comment = "Grand average (n = %d)" % grand_average.nave
+    return grand_average
+
+
+def combine_evoked(all_evoked, weights='nave'):
+    """Merge evoked data by weighted addition
 
     Data should have the same channels and the same time instants.
+    Subtraction can be performed by passing negative weights (e.g., [1, -1]).
 
     Parameters
     ----------
     all_evoked : list of Evoked
-        The evoked datasets
+        The evoked datasets.
+    weights : list of float | str
+        The weights to apply to the data of each evoked instance.
+        Can also be ``'nave'`` to weight according to evoked.nave,
+        or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
 
     Returns
     -------
     evoked : Evoked
-        The merged evoked data
+        The new evoked data.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
-    evoked = deepcopy(all_evoked[0])
+    evoked = all_evoked[0].copy()
+    if isinstance(weights, string_types):
+        if weights not in ('nave', 'equal'):
+            raise ValueError('weights must be a list of float, or "nave" or '
+                             '"equal"')
+        if weights == 'nave':
+            weights = np.array([e.nave for e in all_evoked], float)
+            weights /= weights.sum()
+        else:  # == 'equal'
+            weights = [1. / len(all_evoked)] * len(all_evoked)
+    weights = np.array(weights, float)
+    if weights.ndim != 1 or weights.size != len(all_evoked):
+        raise ValueError('weights must be the same size as all_evoked')
 
     ch_names = evoked.ch_names
     for e in all_evoked[1:]:
@@ -928,68 +1081,20 @@ def merge_evoked(all_evoked):
                                                   "the same channels"
                                                   % (evoked, e))
         assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
-            ValueError("%s and %s do not contain the same time "
-                       "instants" % (evoked, e))
+            ValueError("%s and %s do not contain the same time instants"
+                       % (evoked, e))
 
     # use union of bad channels
     bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
                                                  for ev in all_evoked[1:])))
     evoked.info['bads'] = bads
 
-    all_nave = sum(e.nave for e in all_evoked)
-    evoked.data = sum(e.nave * e.data for e in all_evoked) / all_nave
-    evoked.nave = all_nave
+    evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
+    evoked.nave = max(int(1. / sum(w ** 2 / e.nave
+                                   for w, e in zip(weights, all_evoked))), 1)
     return evoked
 
 
- at deprecated("'read_evoked' will be removed in v0.9. Use 'read_evokeds.'")
-def read_evoked(fname, setno=None, baseline=None, kind='average', proj=True):
-    """Read an evoked dataset
-
-    Parameters
-    ----------
-    fname : string
-        The file name.
-    setno : int or str | list of int or str | None
-        The index or list of indices of the evoked dataset to read. FIF files
-        can contain multiple datasets. If None and there is only one dataset in
-        the file, this dataset is loaded.
-    baseline : None (default) or tuple of length 2
-        The time interval to apply baseline correction. If None do not apply it.
-        If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a
-        is None the beginning of the data is used and if b is None then b is set
-        to the end of the interval. If baseline is equal ot (None, None) all the
-        time interval is used.
-    kind : str
-        Either 'average' or 'standard_error', the type of data to read.
-    proj : bool
-        If False, available projectors won't be applied to the data.
-
-    Returns
-    -------
-    evoked : instance of Evoked or list of Evoked
-        The evoked datasets.
-    """
-    evoked_node = _get_evoked_node(fname)
-    if setno is None and len(evoked_node) > 1:
-        fid, _, _ = fiff_open(fname)
-        try:
-            _, _, t = _get_entries(fid, evoked_node)
-        except:
-            t = 'None found, must use integer'
-        else:
-            fid.close()
-        raise ValueError('%d datasets present, setno parameter must be set.'
-                         'Candidate setno names:\n%s' % (len(evoked_node), t))
-    elif isinstance(setno, list):
-        return [Evoked(fname, s, baseline=baseline, kind=kind, proj=proj)
-                for s in setno]
-    else:
-        if setno is None:
-            setno = 0
-        return Evoked(fname, setno, baseline=baseline, kind=kind, proj=proj)
-
-
 @verbose
 def read_evokeds(fname, condition=None, baseline=None, kind='average',
                  proj=True, verbose=None):
@@ -1004,11 +1109,11 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average',
         can contain multiple datasets. If None, all datasets are returned as a
         list.
     baseline : None (default) or tuple of length 2
-        The time interval to apply baseline correction. If None do not apply it.
-        If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a
-        is None the beginning of the data is used and if b is None then b is set
-        to the end of the interval. If baseline is equal ot (None, None) all the
-        time interval is used.
+        The time interval to apply baseline correction. If None do not apply
+        it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used and if b is None then
+        b is set to the end of the interval. If baseline is equal to
+        (None, None) all the time interval is used.
     kind : str
         Either 'average' or 'standard_error', the type of data to read.
     proj : bool
@@ -1021,6 +1126,10 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average',
     evoked : Evoked (if condition is int or str) or list of Evoked (if
         condition is None or list)
         The evoked dataset(s).
+
+    See Also
+    --------
+    write_evokeds
     """
     check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
 
@@ -1038,67 +1147,6 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average',
     return out if return_list else out[0]
 
 
- at deprecated("'write_evoked' will be removed in v0.9. Use 'write_evokeds.'")
-def write_evoked(fname, evoked):
-    """Write an evoked dataset to a file
-
-    Parameters
-    ----------
-    fname : string
-        The file name.
-    evoked : instance of Evoked, or list of Evoked
-        The evoked dataset to save, or a list of evoked datasets to save in one
-        file. Note that the measurement info from the first evoked instance is
-        used, so be sure that information matches.
-    """
-
-    if not isinstance(evoked, list):
-        evoked = [evoked]
-
-    # Create the file and save the essentials
-    fid = start_file(fname)
-
-    start_block(fid, FIFF.FIFFB_MEAS)
-    write_id(fid, FIFF.FIFF_BLOCK_ID)
-    if evoked[0].info['meas_id'] is not None:
-        write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
-
-    # Write measurement info
-    write_meas_info(fid, evoked[0].info)
-
-    # One or more evoked data sets
-    start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
-    for e in evoked:
-        start_block(fid, FIFF.FIFFB_EVOKED)
-
-        # Comment is optional
-        if e.comment is not None and len(e.comment) > 0:
-            write_string(fid, FIFF.FIFF_COMMENT, e.comment)
-
-        # First and last sample
-        write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
-        write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
-
-        # The epoch itself
-        start_block(fid, FIFF.FIFFB_ASPECT)
-
-        write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
-        write_int(fid, FIFF.FIFF_NAVE, e.nave)
-
-        decal = np.zeros((e.info['nchan'], 1))
-        for k in range(e.info['nchan']):
-            decal[k] = 1.0 / (e.info['chs'][k]['cal']
-                              * e.info['chs'][k].get('scale', 1.0))
-
-        write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
-        end_block(fid, FIFF.FIFFB_ASPECT)
-        end_block(fid, FIFF.FIFFB_EVOKED)
-
-    end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
-    end_block(fid, FIFF.FIFFB_MEAS)
-    end_file(fid)
-
-
 def write_evokeds(fname, evoked):
     """Write an evoked dataset to a file
 
@@ -1110,6 +1158,10 @@ def write_evokeds(fname, evoked):
         The evoked dataset, or list of evoked datasets, to save in one file.
         Note that the measurement info from the first evoked instance is used,
         so be sure that information matches.
+
+    See Also
+    --------
+    read_evokeds
     """
     check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
 
@@ -1148,8 +1200,8 @@ def write_evokeds(fname, evoked):
 
             decal = np.zeros((e.info['nchan'], 1))
             for k in range(e.info['nchan']):
-                decal[k] = 1.0 / (e.info['chs'][k]['cal']
-                                  * e.info['chs'][k].get('scale', 1.0))
+                decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
+                                  e.info['chs'][k].get('scale', 1.0))
 
             write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
             end_block(fid, FIFF.FIFFB_ASPECT)
@@ -1192,8 +1244,8 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
     modes = ('abs', 'neg', 'pos')
     if mode not in modes:
         raise ValueError('The `mode` parameter must be `{modes}`. You gave '
-                          'me `{mode}`'.format(modes='` or `'.join(modes),
-                                               mode=mode))
+                         'me `{mode}`'.format(modes='` or `'.join(modes),
+                                              mode=mode))
 
     if tmin is None:
         tmin = times[0]
diff --git a/mne/externals/FieldTrip.py b/mne/externals/FieldTrip.py
index 0ac21bc..66bb1d3 100644
--- a/mne/externals/FieldTrip.py
+++ b/mne/externals/FieldTrip.py
@@ -315,10 +315,10 @@ class Client:
                 offset += chunk_len
 
             if CHUNK_CHANNEL_NAMES in H.chunks:
-                L = H.chunks[CHUNK_CHANNEL_NAMES].split('\0')
+                L = H.chunks[CHUNK_CHANNEL_NAMES].split(b'\0')
                 numLab = len(L)
                 if numLab >= H.nChannels:
-                    H.labels = L[0:H.nChannels]
+                    H.labels = [x.decode('utf-8') for x in L[0:H.nChannels]]
 
         return H
 
diff --git a/mne/externals/__init__.py b/mne/externals/__init__.py
index 2dd0d4a..6f70ab7 100644
--- a/mne/externals/__init__.py
+++ b/mne/externals/__init__.py
@@ -1,4 +1,5 @@
 from . import six
 from . import jdcal
 from . import decorator
-from . import tempita
\ No newline at end of file
+from . import tempita
+from . import h5io
diff --git a/mne/externals/h5io/__init__.py b/mne/externals/h5io/__init__.py
new file mode 100644
index 0000000..ea54792
--- /dev/null
+++ b/mne/externals/h5io/__init__.py
@@ -0,0 +1,6 @@
+"""Python Objects Onto HDF5
+"""
+
+__version__ = '0.1.dev0'
+
+from ._h5io import read_hdf5, write_hdf5, _TempDir, object_diff  # noqa, analysis:ignore
diff --git a/mne/externals/h5io/_h5io.py b/mne/externals/h5io/_h5io.py
new file mode 100644
index 0000000..36dd9f7
--- /dev/null
+++ b/mne/externals/h5io/_h5io.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import sys
+import tempfile
+from shutil import rmtree
+from os import path as op
+
+import numpy as np
+try:
+    from scipy import sparse
+except ImportError:
+    sparse = None
+
+# Adapted from six
+PY3 = sys.version_info[0] == 3
+text_type = str if PY3 else unicode  # noqa
+string_types = str if PY3 else basestring  # noqa
+
+
+##############################################################################
+# WRITING
+
+def _check_h5py():
+    """Helper to check if h5py is installed"""
+    try:
+        import h5py
+    except ImportError:
+        raise ImportError('the h5py module is required to use HDF5 I/O')
+    return h5py
+
+
+def _create_titled_group(root, key, title):
+    """Helper to create a titled group in h5py"""
+    out = root.create_group(key)
+    out.attrs['TITLE'] = title
+    return out
+
+
+def _create_titled_dataset(root, key, title, data, comp_kw=None):
+    """Helper to create a titled dataset in h5py"""
+    comp_kw = {} if comp_kw is None else comp_kw
+    out = root.create_dataset(key, data=data, **comp_kw)
+    out.attrs['TITLE'] = title
+    return out
+
+
+def write_hdf5(fname, data, overwrite=False, compression=4,
+               title='h5io'):
+    """Write python object to HDF5 format using h5py
+
+    Parameters
+    ----------
+    fname : str
+        Filename to use.
+    data : object
+        Object to write. Can be of any of these types:
+            {ndarray, dict, list, tuple, int, float, str}
+        Note that dict objects must only have ``str`` keys.
+    overwrite : bool
+        If True, overwrite file (if it exists).
+    compression : int
+        Compression level to use (0-9) to compress data using gzip.
+    title : str
+        The top-level directory name to use. Typically it is useful to make
+        this your package name, e.g. ``'mnepython'``.
+    """
+    h5py = _check_h5py()
+    if op.isfile(fname) and not overwrite:
+        raise IOError('file "%s" exists, use overwrite=True to overwrite'
+                      % fname)
+    if not isinstance(title, string_types):
+        raise ValueError('title must be a string')
+    comp_kw = dict()
+    if compression > 0:
+        comp_kw = dict(compression='gzip', compression_opts=compression)
+    with h5py.File(fname, mode='w') as fid:
+        _triage_write(title, data, fid, comp_kw, str(type(data)))
+
+
+def _triage_write(key, value, root, comp_kw, where):
+    if isinstance(value, dict):
+        sub_root = _create_titled_group(root, key, 'dict')
+        for key, sub_value in value.items():
+            if not isinstance(key, string_types):
+                raise TypeError('All dict keys must be strings')
+            _triage_write('key_{0}'.format(key), sub_value, sub_root, comp_kw,
+                          where + '["%s"]' % key)
+    elif isinstance(value, (list, tuple)):
+        title = 'list' if isinstance(value, list) else 'tuple'
+        sub_root = _create_titled_group(root, key, title)
+        for vi, sub_value in enumerate(value):
+            _triage_write('idx_{0}'.format(vi), sub_value, sub_root, comp_kw,
+                          where + '[%s]' % vi)
+    elif isinstance(value, type(None)):
+        _create_titled_dataset(root, key, 'None', [False])
+    elif isinstance(value, (int, float)):
+        if isinstance(value, int):
+            title = 'int'
+        else:  # isinstance(value, float):
+            title = 'float'
+        _create_titled_dataset(root, key, title, np.atleast_1d(value))
+    elif isinstance(value, string_types):
+        if isinstance(value, text_type):  # unicode
+            value = np.fromstring(value.encode('utf-8'), np.uint8)
+            title = 'unicode'
+        else:
+            value = np.fromstring(value.encode('ASCII'), np.uint8)
+            title = 'ascii'
+        _create_titled_dataset(root, key, title, value, comp_kw)
+    elif isinstance(value, np.ndarray):
+        _create_titled_dataset(root, key, 'ndarray', value)
+    elif sparse is not None and isinstance(value, sparse.csc_matrix):
+        sub_root = _create_titled_group(root, key, 'csc_matrix')
+        _triage_write('data', value.data, sub_root, comp_kw,
+                      where + '.csc_matrix_data')
+        _triage_write('indices', value.indices, sub_root, comp_kw,
+                      where + '.csc_matrix_indices')
+        _triage_write('indptr', value.indptr, sub_root, comp_kw,
+                      where + '.csc_matrix_indptr')
+    else:
+        raise TypeError('unsupported type %s (in %s)' % (type(value), where))
+
+
+##############################################################################
+# READING
+
+def read_hdf5(fname, title='h5io'):
+    """Read python object from HDF5 format using h5py
+
+    Parameters
+    ----------
+    fname : str
+        File to load.
+    title : str
+        The top-level directory name to use. Typically it is useful to make
+        this your package name, e.g. ``'mnepython'``.
+
+    Returns
+    -------
+    data : object
+        The loaded data. Can be of any type supported by ``write_hdf5``.
+    """
+    h5py = _check_h5py()
+    if not op.isfile(fname):
+        raise IOError('file "%s" not found' % fname)
+    if not isinstance(title, string_types):
+        raise ValueError('title must be a string')
+    with h5py.File(fname, mode='r') as fid:
+        if title not in fid.keys():
+            raise ValueError('no "%s" data found' % title)
+        data = _triage_read(fid[title])
+    return data
+
+
+def _triage_read(node):
+    h5py = _check_h5py()
+    type_str = node.attrs['TITLE']
+    if isinstance(type_str, bytes):
+        type_str = type_str.decode()
+    if isinstance(node, h5py.Group):
+        if type_str == 'dict':
+            data = dict()
+            for key, subnode in node.items():
+                data[key[4:]] = _triage_read(subnode)
+        elif type_str in ['list', 'tuple']:
+            data = list()
+            ii = 0
+            while True:
+                subnode = node.get('idx_{0}'.format(ii), None)
+                if subnode is None:
+                    break
+                data.append(_triage_read(subnode))
+                ii += 1
+            assert len(data) == ii
+            data = tuple(data) if type_str == 'tuple' else data
+            return data
+        elif type_str == 'csc_matrix':
+            if sparse is None:
+                raise RuntimeError('scipy must be installed to read this data')
+            data = sparse.csc_matrix((_triage_read(node['data']),
+                                      _triage_read(node['indices']),
+                                      _triage_read(node['indptr'])))
+        else:
+            raise NotImplementedError('Unknown group type: {0}'
+                                      ''.format(type_str))
+    elif type_str == 'ndarray':
+        data = np.array(node)
+    elif type_str in ('int', 'float'):
+        cast = int if type_str == 'int' else float
+        data = cast(np.array(node)[0])
+    elif type_str in ('unicode', 'ascii', 'str'):  # 'str' for backward compat
+        decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
+        cast = text_type if type_str == 'unicode' else str
+        data = cast(np.array(node).tostring().decode(decoder))
+    elif type_str == 'None':
+        data = None
+    else:
+        raise TypeError('Unknown node type: {0}'.format(type_str))
+    return data
+
+
+# ############################################################################
+# UTILITIES
+
+def _sort_keys(x):
+    """Sort and return keys of dict"""
+    keys = list(x.keys())  # note: not thread-safe
+    idx = np.argsort([str(k) for k in keys])
+    keys = [keys[ii] for ii in idx]
+    return keys
+
+
+def object_diff(a, b, pre=''):
+    """Compute all differences between two python variables
+
+    Parameters
+    ----------
+    a : object
+        Currently supported: dict, list, tuple, ndarray, int, str, bytes,
+        float.
+    b : object
+        Must be same type as x1.
+    pre : str
+        String to prepend to each line.
+
+    Returns
+    -------
+    diffs : str
+        A string representation of the differences.
+    """
+    out = ''
+    if type(a) != type(b):
+        out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
+    elif isinstance(a, dict):
+        k1s = _sort_keys(a)
+        k2s = _sort_keys(b)
+        m1 = set(k2s) - set(k1s)
+        if len(m1):
+            out += pre + ' x1 missing keys %s\n' % (m1)
+        for key in k1s:
+            if key not in k2s:
+                out += pre + ' x2 missing key %s\n' % key
+            else:
+                out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
+    elif isinstance(a, (list, tuple)):
+        if len(a) != len(b):
+            out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
+        else:
+            for xx1, xx2 in zip(a, b):
+                out += object_diff(xx1, xx2, pre='')
+    elif isinstance(a, (string_types, int, float, bytes)):
+        if a != b:
+            out += pre + ' value mismatch (%s, %s)\n' % (a, b)
+    elif a is None:
+        pass  # b must be None due to our type checking
+    elif isinstance(a, np.ndarray):
+        if not np.array_equal(a, b):
+            out += pre + ' array mismatch\n'
+    elif sparse is not None and sparse.isspmatrix(a):
+        # sparsity and sparse type of b vs a already checked above by type()
+        if b.shape != a.shape:
+            out += pre + (' sparse matrix a and b shape mismatch'
+                          '(%s vs %s)' % (a.shape, b.shape))
+        else:
+            c = a - b
+            c.eliminate_zeros()
+            if c.nnz > 0:
+                out += pre + (' sparse matrix a and b differ on %s '
+                              'elements' % c.nnz)
+    else:
+        raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
+    return out
+
+
+class _TempDir(str):
+    """Class for creating and auto-destroying temp dir
+
+    This is designed to be used with testing modules. Instances should be
+    defined inside test functions. Instances defined at module level can not
+    guarantee proper destruction of the temporary directory.
+
+    When used at module level, the current use of the __del__() method for
+    cleanup can fail because the rmtree function may be cleaned up before this
+    object (an alternative could be using the atexit module instead).
+    """
+    def __new__(self):
+        new = str.__new__(self, tempfile.mkdtemp())
+        return new
+
+    def __init__(self):
+        self._path = self.__str__()
+
+    def __del__(self):
+        rmtree(self._path, ignore_errors=True)
diff --git a/mne/fiff/__init__.py b/mne/fiff/__init__.py
deleted file mode 100644
index 618dfe1..0000000
--- a/mne/fiff/__init__.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""FIF module for IO with .fif files"""
-
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
-#
-# License: BSD (3-clause)
-
-from ..utils import deprecated
-
-from ..io.open import fiff_open, show_fiff, _fiff_get_fid
-from ..evoked import (Evoked, read_evoked, write_evoked, read_evokeds,
-                      write_evokeds)
-from ..io.meas_info import read_fiducials, write_fiducials, read_info, write_info
-from ..io.pick import (pick_types, pick_channels, pick_types_evoked,
-                       pick_channels_regexp, pick_channels_forward,
-                       pick_types_forward, pick_channels_cov,
-                       pick_channels_evoked, pick_info, _has_kit_refs)
-
-from ..io.proj import proj_equal, make_eeg_average_ref_proj
-from ..cov import _read_cov, _write_cov
-from ..io import array
-from ..io import base
-from ..io import brainvision
-from ..io import bti
-from ..io import edf
-from ..io import egi
-from ..io import fiff
-from ..io import kit
-
-# for backward compatibility
-from ..io.fiff import RawFIFF
-from ..io.fiff import RawFIFF as Raw
-from ..io.base import concatenate_raws, get_chpi_positions, set_eeg_reference
-
-def _deprecate_io(obj, name):
-    return deprecated('Use mne.io.%s as mne.fiff.%s is deprecated and will be '
-                      'removed in v0.9.' % (name, name))(obj)
-
-def _deprecate_mne(obj, name):
-    return deprecated('Use mne.%s as mne.fiff.%s is deprecated and will be '
-                      'removed in v0.9.' % (name, name))(obj)
-
-
-# our decorator overwrites the class, so we need to wrap :(
-class Evoked(Evoked):
-    pass
-
-
-class Raw(Raw):
-    pass
-
-
-Evoked = _deprecate_io(Evoked, 'Evoked')
-Raw = _deprecate_io(Raw, 'Raw')
-read_evoked = _deprecate_io(read_evoked, 'read_evoked')
-read_evokeds = _deprecate_io(read_evokeds, 'read_evokeds')
-write_evoked = _deprecate_io(write_evoked, 'write_evoked')
-write_evokeds = _deprecate_io(write_evokeds, 'write_evokeds')
-read_fiducials = _deprecate_io(read_fiducials, 'read_fiducials')
-write_fiducials = _deprecate_io(write_fiducials, 'write_fiducials')
-read_info = _deprecate_io(read_info, 'read_info')
-write_info = _deprecate_io(write_info, 'write_info')
-proj_equal = _deprecate_io(proj_equal, 'proj_equal')
-make_eeg_average_ref_proj = _deprecate_io(make_eeg_average_ref_proj, 'make_eeg_average_ref_proj')
-read_cov = _deprecate_io(_read_cov, 'read_cov')
-write_cov = _deprecate_io(_write_cov, 'write_cov')
-concatenate_raws = _deprecate_io(concatenate_raws, 'concatenate_raws')
-get_chpi_positions = _deprecate_io(get_chpi_positions, 'get_chpi_positions')
-set_eeg_reference = _deprecate_io(set_eeg_reference, 'set_eeg_reference')
-
-pick_types = _deprecate_mne(pick_types, 'pick_types')
-pick_channels = _deprecate_mne(pick_channels, 'pick_channels')
-pick_types_evoked = _deprecate_mne(pick_types_evoked, 'pick_types_evoked')
-pick_channels_regexp = _deprecate_mne(pick_channels_regexp, 'pick_channels_regexp')
-pick_channels_forward = _deprecate_mne(pick_channels_forward, 'pick_channels_forward')
-pick_types_forward = _deprecate_mne(pick_types_forward, 'pick_types_forward')
-pick_channels_cov = _deprecate_mne(pick_channels_cov, 'pick_channels_cov')
-pick_channels_evoked = _deprecate_mne(pick_channels_evoked, 'pick_channels_evoked')
-pick_info = _deprecate_mne(pick_info, 'pick_info')
diff --git a/mne/filter.py b/mne/filter.py
index b7b6cfa..8e881d8 100644
--- a/mne/filter.py
+++ b/mne/filter.py
@@ -4,16 +4,14 @@ from .externals.six import string_types, integer_types
 import warnings
 import numpy as np
 from scipy.fftpack import fft, ifftshift, fftfreq
-from scipy.signal import freqz, iirdesign, iirfilter, filter_dict, get_window
-from scipy import signal, stats
 from copy import deepcopy
 
-from .fixes import firwin2, filtfilt  # back port for old scipy
+from .fixes import get_firwin2, get_filtfilt
 from .time_frequency.multitaper import dpss_windows, _mt_spectra
-from .parallel import parallel_func
+from .parallel import parallel_func, check_n_jobs
 from .cuda import (setup_cuda_fft_multiply_repeated, fft_multiply_repeated,
                    setup_cuda_fft_resample, fft_resample, _smart_pad)
-from .utils import logger, verbose, sum_squared
+from .utils import logger, verbose, sum_squared, check_version
 
 
 def is_power2(num):
@@ -29,8 +27,8 @@ def is_power2(num):
     b : bool
         True if is power of 2.
 
-    Example
-    -------
+    Examples
+    --------
     >>> is_power2(2 ** 3)
     True
     >>> is_power2(5)
@@ -45,11 +43,10 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
     """ Filter using overlap-add FFTs.
 
     Filters the signal x using a filter with the impulse response h.
-    If zero_phase==True, the amplitude response is scaled and the filter is
-    applied in forward and backward direction, resulting in a zero-phase
-    filter.
+    If zero_phase==True, the the filter is applied twice, once in the forward
+    direction and once backward , resulting in a zero-phase filter.
 
-    WARNING: This operates on the data in-place.
+    .. warning:: This operates on the data in-place.
 
     Parameters
     ----------
@@ -79,23 +76,29 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
     # Extend the signal by mirroring the edges to reduce transient filter
     # response
     n_h = len(h)
-    n_edge = min(n_h, x.shape[1])
+    if n_h == 1:
+        return x * h ** 2 if zero_phase else x * h
+    if x.shape[1] < len(h):
+        raise ValueError('Overlap add should only be used for signals '
+                         'longer than the requested filter')
+    n_edge = max(min(n_h, x.shape[1]) - 1, 0)
 
-    n_x = x.shape[1] + 2 * n_edge - 2
+    n_x = x.shape[1] + 2 * n_edge
 
     # Determine FFT length to use
     if n_fft is None:
-        if n_x > n_h:
+        min_fft = 2 * n_h - 1
+        max_fft = n_x
+        if max_fft >= min_fft:
             n_tot = 2 * n_x if zero_phase else n_x
 
-            min_fft = 2 * n_h - 1
-            max_fft = n_x
-
             # cost function based on number of multiplications
             N = 2 ** np.arange(np.ceil(np.log2(min_fft)),
                                np.ceil(np.log2(max_fft)) + 1, dtype=int)
-            cost = (np.ceil(n_tot / (N - n_h + 1).astype(np.float))
-                    * N * (np.log2(N) + 1))
+            # if doing zero-phase, h needs to be thought of as ~ twice as long
+            n_h_cost = 2 * n_h - 1 if zero_phase else n_h
+            cost = (np.ceil(n_tot / (N - n_h_cost + 1).astype(np.float)) *
+                    N * (np.log2(N) + 1))
 
             # add a heuristic term to prevent too-long FFT's which are slow
             # (not predicted by mult. cost alone, 4e-5 exp. determined)
@@ -106,30 +109,29 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
             # Use only a single block
             n_fft = 2 ** int(np.ceil(np.log2(n_x + n_h - 1)))
 
-    if n_fft < 2 * n_h - 1:
-        raise ValueError('n_fft is too short, has to be at least '
-                         '"2 * len(h) - 1"')
+    if zero_phase and n_fft <= 2 * n_h - 1:
+        raise ValueError("n_fft is too short, has to be at least "
+                         "2 * len(h) - 1 if zero_phase == True")
+    elif not zero_phase and n_fft <= n_h:
+        raise ValueError("n_fft is too short, has to be at least "
+                         "len(h) if zero_phase == False")
 
     if not is_power2(n_fft):
         warnings.warn("FFT length is not a power of 2. Can be slower.")
 
     # Filter in frequency domain
-    h_fft = fft(np.r_[h, np.zeros(n_fft - n_h, dtype=h.dtype)])
+    h_fft = fft(np.concatenate([h, np.zeros(n_fft - n_h, dtype=h.dtype)]))
+    assert(len(h_fft) == n_fft)
 
     if zero_phase:
-        # We will apply the filter in forward and backward direction: Scale
-        # frequency response of the filter so that the shape of the amplitude
-        # response stays the same when it is applied twice
-
-        # be careful not to divide by too small numbers
-        idx = np.where(np.abs(h_fft) > 1e-6)
-        h_fft[idx] = h_fft[idx] / np.sqrt(np.abs(h_fft[idx]))
-
-    # Segment length for signal x
-    n_seg = n_fft - n_h + 1
-
-    # Number of segments (including fractional segments)
-    n_segments = int(np.ceil(n_x / float(n_seg)))
+        """Zero-phase filtering is now done in one pass by taking the squared
+        magnitude of h_fft. This gives equivalent results to the old two-pass
+        method but theoretically doubles the speed for long fft lengths. To
+        compensate for this, overlapping must be done both before and after
+        each segment. When zero_phase == False it only needs to be done after.
+        """
+        h_fft = (h_fft * h_fft.conj()).real
+        # equivalent to convolving h(t) and h(-t) in the time domain
 
     # Figure out if we should use CUDA
     n_jobs, cuda_dict, h_fft = setup_cuda_fft_multiply_repeated(n_jobs, h_fft)
@@ -137,13 +139,12 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
     # Process each row separately
     if n_jobs == 1:
         for p in picks:
-            x[p] = _1d_overlap_filter(x[p], h_fft, n_edge, n_fft, zero_phase,
-                                      n_segments, n_seg, cuda_dict)
+            x[p] = _1d_overlap_filter(x[p], h_fft, n_h, n_edge, zero_phase,
+                                      cuda_dict)
     else:
-        _check_njobs(n_jobs, can_be_cuda=True)
         parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs)
-        data_new = parallel(p_fun(x[p], h_fft, n_edge, n_fft, zero_phase,
-                                  n_segments, n_seg, cuda_dict)
+        data_new = parallel(p_fun(x[p], h_fft, n_h, n_edge, zero_phase,
+                                  cuda_dict)
                             for p in picks)
         for pp, p in enumerate(picks):
             x[p] = data_new[pp]
@@ -151,46 +152,60 @@ def _overlap_add_filter(x, h, n_fft=None, zero_phase=True, picks=None,
     return x
 
 
-def _1d_overlap_filter(x, h_fft, n_edge, n_fft, zero_phase, n_segments, n_seg,
-                       cuda_dict):
+def _1d_overlap_filter(x, h_fft, n_h, n_edge, zero_phase, cuda_dict):
     """Do one-dimensional overlap-add FFT FIR filtering"""
     # pad to reduce ringing
-    x_ext = _smart_pad(x, n_edge - 1)
+    if cuda_dict['use_cuda']:
+        n_fft = cuda_dict['x'].size  # account for CUDA's modification of h_fft
+    else:
+        n_fft = len(h_fft)
+    x_ext = _smart_pad(x, n_edge)
     n_x = len(x_ext)
-    filter_input = x_ext
-    x_filtered = np.zeros_like(filter_input)
-
-    for pass_no in list(range(2)) if zero_phase else list(range(1)):
-
-        if pass_no == 1:
-            # second pass: flip signal
-            filter_input = np.flipud(x_filtered)
-            x_filtered = np.zeros_like(x_ext)
-
-        for seg_idx in range(n_segments):
-            seg = filter_input[seg_idx * n_seg:(seg_idx + 1) * n_seg]
-            seg = np.r_[seg, np.zeros(n_fft - len(seg))]
-            prod = fft_multiply_repeated(h_fft, seg, cuda_dict)
-            if seg_idx * n_seg + n_fft < n_x:
-                x_filtered[seg_idx * n_seg:seg_idx * n_seg + n_fft] += prod
-            else:
-                # Last segment
-                x_filtered[seg_idx * n_seg:] += prod[:n_x - seg_idx * n_seg]
-
-    # Remove mirrored edges that we added
-    x_filtered = x_filtered[n_edge - 1:-n_edge + 1]
+    x_filtered = np.zeros_like(x_ext)
 
     if zero_phase:
-        # flip signal back
-        x_filtered = np.flipud(x_filtered)
+        # Segment length for signal x (convolving twice)
+        n_seg = n_fft - 2 * (n_h - 1) - 1
+
+        # Number of segments (including fractional segments)
+        n_segments = int(np.ceil(n_x / float(n_seg)))
 
+        # padding parameters to ensure filtering is done properly
+        pre_pad = n_h - 1
+        post_pad = n_fft - (n_h - 1)
+    else:
+        n_seg = n_fft - n_h + 1
+        n_segments = int(np.ceil(n_x / float(n_seg)))
+        pre_pad = 0
+        post_pad = n_fft
+
+    # Now the actual filtering step is identical for zero-phase (filtfilt-like)
+    # or single-pass
+    for seg_idx in range(n_segments):
+        start = seg_idx * n_seg
+        stop = (seg_idx + 1) * n_seg
+        seg = x_ext[start:stop]
+        seg = np.concatenate([np.zeros(pre_pad), seg,
+                              np.zeros(post_pad - len(seg))])
+
+        prod = fft_multiply_repeated(h_fft, seg, cuda_dict)
+
+        start_filt = max(0, start - pre_pad)
+        stop_filt = min(start - pre_pad + n_fft, n_x)
+        start_prod = max(0, pre_pad - start)
+        stop_prod = start_prod + stop_filt - start_filt
+        x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod]
+
+    # Remove mirrored edges that we added and cast
+    if n_edge > 0:
+        x_filtered = x_filtered[n_edge:-n_edge]
     x_filtered = x_filtered.astype(x.dtype)
     return x_filtered
 
 
 def _filter_attenuation(h, freq, gain):
     """Compute minimum attenuation at stop frequency"""
-
+    from scipy.signal import freqz
     _, filt_resp = freqz(h.ravel(), worN=np.pi * freq)
     filt_resp = np.abs(filt_resp)  # use amplitude response
     filt_resp /= np.max(filt_resp)
@@ -198,7 +213,6 @@ def _filter_attenuation(h, freq, gain):
     idx = np.argmax(filt_resp)
     att_db = -20 * np.log10(filt_resp[idx])
     att_freq = freq[idx]
-
     return att_db, att_freq
 
 
@@ -221,6 +235,9 @@ def _1d_fftmult_ext(x, B, extend_x, cuda_dict):
 
 def _prep_for_filtering(x, copy, picks=None):
     """Set up array as 2D for filtering ease"""
+    if x.dtype != np.float64:
+        raise TypeError("Arrays passed for filtering must have a dtype of "
+                        "np.float64")
     if copy is True:
         x = x.copy()
     orig_shape = x.shape
@@ -228,6 +245,15 @@ def _prep_for_filtering(x, copy, picks=None):
     x.shape = (np.prod(x.shape[:-1]), x.shape[-1])
     if picks is None:
         picks = np.arange(x.shape[0])
+    elif len(orig_shape) == 3:
+        n_epochs, n_channels, n_times = orig_shape
+        offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels),
+                           len(picks))
+        picks = np.tile(picks, n_epochs) + offset
+    elif len(orig_shape) > 3:
+        raise ValueError('picks argument is not supported for data with more'
+                         ' than three dimensions')
+
     return x, orig_shape, picks
 
 
@@ -272,6 +298,7 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
     xf : array
         x filtered.
     """
+    firwin2 = get_firwin2()
     # set up array for filtering, reshape to 2D, operate on last axis
     x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
 
@@ -282,6 +309,7 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
     freq = np.array(freq) / (Fs / 2.)
     gain = np.array(gain)
     filter_length = _get_filter_length(filter_length, Fs, len_x=x.shape[1])
+    n_jobs = check_n_jobs(n_jobs, allow_cuda=True)
 
     if filter_length is None or x.shape[1] <= filter_length:
         # Use direct FFT filtering for short signals
@@ -296,16 +324,16 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
 
         N = x.shape[1] + (extend_x is True)
 
-        H = firwin2(N, freq, gain)[np.newaxis, :]
+        h = firwin2(N, freq, gain)[np.newaxis, :]
 
-        att_db, att_freq = _filter_attenuation(H, freq, gain)
+        att_db, att_freq = _filter_attenuation(h, freq, gain)
         if att_db < min_att_db:
             att_freq *= Fs / 2
             warnings.warn('Attenuation at stop frequency %0.1fHz is only '
                           '%0.1fdB.' % (att_freq, att_db))
 
         # Make zero-phase filter function
-        B = np.abs(fft(H)).ravel()
+        B = np.abs(fft(h)).ravel()
 
         # Figure out if we should use CUDA
         n_jobs, cuda_dict, B = setup_cuda_fft_multiply_repeated(n_jobs, B)
@@ -314,7 +342,6 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
             for p in picks:
                 x[p] = _1d_fftmult_ext(x[p], B, extend_x, cuda_dict)
         else:
-            _check_njobs(n_jobs, can_be_cuda=True)
             parallel, p_fun, _ = parallel_func(_1d_fftmult_ext, n_jobs)
             data_new = parallel(p_fun(x[p], B, extend_x, cuda_dict)
                                 for p in picks)
@@ -329,9 +356,10 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
             # Gain at Nyquist freq: 1: make N EVEN, 0: make N ODD
             N += 1
 
-        H = firwin2(N, freq, gain)
+        # construct filter with gain resulting from forward-backward filtering
+        h = firwin2(N, freq, gain, window='hann')
 
-        att_db, att_freq = _filter_attenuation(H, freq, gain)
+        att_db, att_freq = _filter_attenuation(h, freq, gain)
         att_db += 6  # the filter is applied twice (zero phase)
         if att_db < min_att_db:
             att_freq *= Fs / 2
@@ -339,7 +367,10 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
                           '%0.1fdB. Increase filter_length for higher '
                           'attenuation.' % (att_freq, att_db))
 
-        x = _overlap_add_filter(x, H, zero_phase=True, picks=picks,
+        # reconstruct filter, this time with appropriate gain for fwd-bkwd
+        gain = np.sqrt(gain)
+        h = firwin2(N, freq, gain, window='hann')
+        x = _overlap_add_filter(x, h, zero_phase=True, picks=picks,
                                 n_jobs=n_jobs)
 
     x.shape = orig_shape
@@ -348,7 +379,8 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1,
 
 def _check_coefficients(b, a):
     """Check for filter stability"""
-    z, p, k = signal.tf2zpk(b, a)
+    from scipy.signal import tf2zpk
+    z, p, k = tf2zpk(b, a)
     if np.any(np.abs(p) > 1.0):
         raise RuntimeError('Filter poles outside unit circle, filter will be '
                            'unstable. Consider using different filter '
@@ -358,13 +390,14 @@ def _check_coefficients(b, a):
 def _filtfilt(x, b, a, padlen, picks, n_jobs, copy):
     """Helper to more easily call filtfilt"""
     # set up array for filtering, reshape to 2D, operate on last axis
+    filtfilt = get_filtfilt()
+    n_jobs = check_n_jobs(n_jobs)
     x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
     _check_coefficients(b, a)
     if n_jobs == 1:
         for p in picks:
             x[p] = filtfilt(b, a, x[p], padlen=padlen)
     else:
-        _check_njobs(n_jobs)
         parallel, p_fun, _ = parallel_func(filtfilt, n_jobs)
         data_new = parallel(p_fun(b, a, x[p], padlen=padlen)
                             for p in picks)
@@ -376,9 +409,10 @@ def _filtfilt(x, b, a, padlen, picks, n_jobs, copy):
 
 def _estimate_ringing_samples(b, a):
     """Helper function for determining IIR padding"""
+    from scipy.signal import lfilter
     x = np.zeros(1000)
     x[0] = 1
-    h = signal.lfilter(b, a, x)
+    h = lfilter(b, a, x)
     return np.where(np.abs(h) > 0.001 * np.max(np.abs(h)))[0][-1]
 
 
@@ -413,6 +447,8 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
     f_stop : float or list of float
         Stop-band frequency (same size as f_pass). Not used if 'order' is
         specified in iir_params.
+    sfreq : float | None
+        The sample rate.
     btype : str
         Type of filter. Should be 'lowpass', 'highpass', or 'bandpass'
         (or analogous string representations known to scipy.signal).
@@ -466,7 +502,11 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
     >>> print((iir_params['b'], iir_params['a'], iir_params['padlen']))
     (array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.]), [1, 0], 0)
 
-    """
+    """  # noqa
+    from scipy.signal import iirfilter, iirdesign
+    known_filters = ('bessel', 'butter', 'butterworth', 'cauer', 'cheby1',
+                     'cheby2', 'chebyshev1', 'chebyshev2', 'chebyshevi',
+                     'chebyshevii', 'ellip', 'elliptic')
     a = None
     b = None
     # if the filter has been designed, we're good to go
@@ -474,11 +514,11 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
         [b, a] = [iir_params['b'], iir_params['a']]
     else:
         # ensure we have a valid ftype
-        if not 'ftype' in iir_params:
+        if 'ftype' not in iir_params:
             raise RuntimeError('ftype must be an entry in iir_params if ''b'' '
                                'and ''a'' are not specified')
         ftype = iir_params['ftype']
-        if not ftype in filter_dict:
+        if ftype not in known_filters:
             raise RuntimeError('ftype must be in filter_dict from '
                                'scipy.signal (e.g., butter, cheby1, etc.) not '
                                '%s' % ftype)
@@ -491,7 +531,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
         else:
             # use gpass / gstop design
             Ws = np.asanyarray(f_stop) / (float(sfreq) / 2)
-            if not 'gpass' in iir_params or not 'gstop' in iir_params:
+            if 'gpass' not in iir_params or 'gstop' not in iir_params:
                 raise ValueError('iir_params must have at least ''gstop'' and'
                                  ' ''gpass'' (or ''N'') entries')
             [b, a] = iirdesign(Wp, Ws, iir_params['gpass'],
@@ -501,7 +541,7 @@ def construct_iir_filter(iir_params=dict(b=[1, 0], a=[1, 0], padlen=0),
         raise RuntimeError('coefficients could not be created from iir_params')
 
     # now deal with padding
-    if not 'padlen' in iir_params:
+    if 'padlen' not in iir_params:
         padlen = _estimate_ringing_samples(b, a)
     else:
         padlen = iir_params['padlen']
@@ -524,8 +564,8 @@ def _check_method(method, iir_params, extra_types):
     if method == 'iir':
         if iir_params is None:
             iir_params = dict(order=4, ftype='butter')
-        if not isinstance(iir_params, dict) or 'ftype' not in iir_params:
-            raise ValueError('iir_params must be a dict with entry "ftype"')
+        if not isinstance(iir_params, dict):
+            raise ValueError('iir_params must be a dict')
     elif iir_params is not None:
         raise ValueError('iir_params must be None if method != "iir"')
     method = method.lower()
@@ -559,10 +599,13 @@ def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
     l_trans_bandwidth : float
         Width of the transition band at the low cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
     h_trans_bandwidth : float
         Width of the transition band at the high cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
     method : str
         'fft' will use overlap-add FIR filtering, 'iir' will use IIR
         forward-backward filtering (via filtfilt).
@@ -586,9 +629,14 @@ def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
     xf : array
         x filtered.
 
+    See Also
+    --------
+    low_pass_filter, high_pass_filter
+
     Notes
     -----
-    The frequency response is (approximately) given by
+    The frequency response is (approximately) given by::
+
                      ----------
                    /|         | \
                   / |         |  \
@@ -598,17 +646,18 @@ def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s',
                     |         |
               Fs1  Fp1       Fp2   Fs2
 
-    Where
-    Fs1 = Fp1 - l_trans_bandwidth in Hz
-    Fs2 = Fp2 + h_trans_bandwidth in Hz
+    Where:
+
+        Fs1 = Fp1 - l_trans_bandwidth in Hz
+        Fs2 = Fp2 + h_trans_bandwidth in Hz
     """
     iir_params = _check_method(method, iir_params, [])
 
     Fs = float(Fs)
     Fp1 = float(Fp1)
     Fp2 = float(Fp2)
-    Fs1 = Fp1 - l_trans_bandwidth
-    Fs2 = Fp2 + h_trans_bandwidth
+    Fs1 = Fp1 - l_trans_bandwidth if method == 'fft' else Fp1
+    Fs2 = Fp2 + h_trans_bandwidth if method == 'fft' else Fp2
     if Fs2 > Fs / 2:
         raise ValueError('Effective band-stop frequency (%s) is too high '
                          '(maximum based on Nyquist is %s)' % (Fs2, Fs / 2.))
@@ -659,10 +708,13 @@ def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
     l_trans_bandwidth : float
         Width of the transition band at the low cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
     h_trans_bandwidth : float
         Width of the transition band at the high cut-off frequency in Hz.
+        Not used if 'order' is specified in iir_params.
     method : str
         'fft' will use overlap-add FIR filtering, 'iir' will use IIR
         forward-backward filtering (via filtfilt).
@@ -688,7 +740,8 @@ def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
 
     Notes
     -----
-    The frequency response is (approximately) given by
+    The frequency response is (approximately) given by::
+
       ----------                   ----------
                |\                 /|
                | \               / |
@@ -698,9 +751,10 @@ def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
                |    |         |    |
               Fp1  Fs1       Fs2  Fp2
 
-    Where
-    Fs1 = Fp1 - l_trans_bandwidth in Hz
-    Fs2 = Fp2 + h_trans_bandwidth in Hz
+    Where:
+
+        Fs1 = Fp1 + l_trans_bandwidth in Hz
+        Fs2 = Fp2 - h_trans_bandwidth in Hz
 
     Note that multiple stop bands can be specified using arrays.
     """
@@ -714,8 +768,8 @@ def band_stop_filter(x, Fs, Fp1, Fp2, filter_length='10s',
     Fs = float(Fs)
     Fp1 = Fp1.astype(float)
     Fp2 = Fp2.astype(float)
-    Fs1 = Fp1 + l_trans_bandwidth
-    Fs2 = Fp2 - h_trans_bandwidth
+    Fs1 = Fp1 + l_trans_bandwidth if method == 'fft' else Fp1
+    Fs2 = Fp2 - h_trans_bandwidth if method == 'fft' else Fp2
 
     if np.any(Fs1 <= 0):
         raise ValueError('Filter specification invalid: Lower stop frequency '
@@ -767,8 +821,10 @@ def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
     trans_bandwidth : float
-        Width of the transition band in Hz.
+        Width of the transition band in Hz. Not used if 'order' is specified
+        in iir_params.
     method : str
         'fft' will use overlap-add FIR filtering, 'iir' will use IIR
         forward-backward filtering (via filtfilt).
@@ -792,9 +848,15 @@ def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
     xf : array
         x filtered.
 
+    See Also
+    --------
+    resample
+    band_pass_filter, high_pass_filter
+
     Notes
     -----
-    The frequency response is (approximately) given by
+    The frequency response is (approximately) given by::
+
       -------------------------
                               | \
                               |  \
@@ -808,7 +870,7 @@ def low_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
     iir_params = _check_method(method, iir_params, [])
     Fs = float(Fs)
     Fp = float(Fp)
-    Fstop = Fp + trans_bandwidth
+    Fstop = Fp + trans_bandwidth if method == 'fft' else Fp
     if Fstop > Fs / 2.:
         raise ValueError('Effective stop frequency (%s) is too high '
                          '(maximum based on Nyquist is %s)' % (Fstop, Fs / 2.))
@@ -850,8 +912,10 @@ def high_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
     trans_bandwidth : float
-        Width of the transition band in Hz.
+        Width of the transition band in Hz. Not used if 'order' is
+        specified in iir_params.
     method : str
         'fft' will use overlap-add FIR filtering, 'iir' will use IIR
         forward-backward filtering (via filtfilt).
@@ -875,25 +939,30 @@ def high_pass_filter(x, Fs, Fp, filter_length='10s', trans_bandwidth=0.5,
     xf : array
         x filtered.
 
+    See Also
+    --------
+    low_pass_filter, band_pass_filter
+
     Notes
     -----
-    The frequency response is (approximately) given by
-                   -----------------------
-                 /|
-                / |
-               /  |
-              /   |
-    ----------    |
-                  |
-           Fstop  Fp
-
-    where Fstop = Fp - trans_bandwidth
+    The frequency response is (approximately) given by::
+
+                       -----------------------
+                     /|
+                    / |
+                   /  |
+                  /   |
+        ----------    |
+                      |
+               Fstop  Fp
+
+    Where Fstop = Fp - trans_bandwidth.
     """
     iir_params = _check_method(method, iir_params, [])
     Fs = float(Fs)
     Fp = float(Fp)
 
-    Fstop = Fp - trans_bandwidth
+    Fstop = Fp - trans_bandwidth if method == 'fft' else Fp
     if Fstop <= 0:
         raise ValueError('Filter specification invalid: Stop frequency too low'
                          '(%0.1fHz). Increase Fp or reduce transition '
@@ -939,11 +1008,13 @@ def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
         used (faster for long signals). If str, a human-readable time in
         units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
         to the shortest power-of-two length at least that duration.
+        Not used for 'iir' filters.
     notch_widths : float | array of float | None
         Width of the stop band (centred at each freq in freqs) in Hz.
         If None, freqs / 200 is used.
     trans_bandwidth : float
-        Width of the transition band in Hz.
+        Width of the transition band in Hz. Not used if 'order' is
+        specified in iir_params.
     method : str
         'fft' will use overlap-add FIR filtering, 'iir' will use IIR
         forward-backward filtering (via filtfilt). 'spectrum_fit' will
@@ -980,7 +1051,8 @@ def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
 
     Notes
     -----
-    The frequency response is (approximately) given by
+    The frequency response is (approximately) given by::
+
       ----------         -----------
                |\       /|
                | \     / |
@@ -991,8 +1063,9 @@ def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
               Fp1 freq  Fp2
 
     For each freq in freqs, where:
-    Fp1 = freq - trans_bandwidth / 2 in Hz
-    Fs2 = freq + trans_bandwidth / 2 in Hz
+
+        Fp1 = freq - trans_bandwidth / 2 in Hz
+        Fs2 = freq + trans_bandwidth / 2 in Hz
 
     References
     ----------
@@ -1042,21 +1115,47 @@ def notch_filter(x, Fs, freqs, filter_length='10s', notch_widths=None,
 def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,
                       p_value, picks, n_jobs, copy):
     """Helper to more easily call _mt_spectrum_remove"""
+    from scipy import stats
     # set up array for filtering, reshape to 2D, operate on last axis
+    n_jobs = check_n_jobs(n_jobs)
     x, orig_shape, picks = _prep_for_filtering(x, copy, picks)
+
+    # XXX need to implement the moving window version for raw files
+    n_times = x.shape[1]
+
+    # max taper size chosen because it has an max error < 1e-3:
+    # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0]))
+    # 0.00099972447657578449
+    # so we use 1000 because it's the first "nice" number bigger than 953:
+    dpss_n_times_max = 1000
+
+    # figure out what tapers to use
+    if mt_bandwidth is not None:
+        half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
+    else:
+        half_nbw = 4
+
+    # compute dpss windows
+    n_tapers_max = int(2 * half_nbw)
+    window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
+                                       low_bias=False,
+                                       interp_from=min(n_times,
+                                                       dpss_n_times_max))
+    # F-stat of 1-p point
+    threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2)
+
     if n_jobs == 1:
         freq_list = list()
         for ii, x_ in enumerate(x):
             if ii in picks:
                 x[ii], f = _mt_spectrum_remove(x_, sfreq, line_freqs,
-                                               notch_widths, mt_bandwidth,
-                                               p_value)
+                                               notch_widths, window_fun,
+                                               threshold)
                 freq_list.append(f)
     else:
-        _check_njobs(n_jobs)
         parallel, p_fun, _ = parallel_func(_mt_spectrum_remove, n_jobs)
         data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths,
-                                  mt_bandwidth, p_value)
+                                  window_fun, threshold)
                             for xi, x_ in enumerate(x)
                             if xi in picks)
         freq_list = [d[1] for d in data_new]
@@ -1068,7 +1167,7 @@ def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,
         if line_freqs is None:
             if len(rm_freqs) > 0:
                 logger.info('Detected notch frequencies:\n%s'
-                            % ', '.join([str(f) for f in rm_freqs]))
+                            % ', '.join([str(rm_f) for rm_f in rm_freqs]))
             else:
                 logger.info('Detected notch frequecies:\nNone')
 
@@ -1077,34 +1176,12 @@ def _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,
 
 
 def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
-                        mt_bandwidth, p_value):
+                        window_fun, threshold):
     """Use MT-spectrum to remove line frequencies
 
     Based on Chronux. If line_freqs is specified, all freqs within notch_width
     of each line_freq is set to zero.
     """
-    # XXX need to implement the moving window version for raw files
-    n_times = x.size
-
-    # max taper size chosen because it has an max error < 1e-3:
-    # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0]))
-    # 0.00099972447657578449
-    # so we use 1000 because it's the first "nice" number bigger than 953:
-    dpss_n_times_max = 1000
-
-    # figure out what tapers to use
-    if mt_bandwidth is not None:
-        half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
-    else:
-        half_nbw = 4
-
-    # compute dpss windows
-    n_tapers_max = int(2 * half_nbw)
-    window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
-                                       low_bias=False,
-                                       interp_from=min(n_times,
-                                                       dpss_n_times_max))
-
     # drop the even tapers
     n_tapers = len(window_fun)
     tapers_odd = np.arange(0, n_tapers, 2)
@@ -1118,7 +1195,7 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
     H0_sq = sum_squared(H0)
 
     # make "time" vector
-    rads = 2 * np.pi * (np.arange(n_times) / float(sfreq))
+    rads = 2 * np.pi * (np.arange(x.size) / float(sfreq))
 
     # compute mt_spectrum (returning n_ch, n_tapers, n_freq)
     x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq)
@@ -1137,14 +1214,12 @@ def _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,
         x_hat = A * H0[:, np.newaxis]
 
         # numerator for F-statistic
-        num = (n_tapers - 1) * (np.abs(A) ** 2) * H0_sq
+        num = (n_tapers - 1) * (A * A.conj()).real * H0_sq
         # denominator for F-statistic
         den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) +
                np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1))
         den[den == 0] = np.inf
         f_stat = num / den
-        # F-stat of 1-p point
-        threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * n_tapers - 2)
 
         # find frequencies to remove
         indices = np.where(f_stat > threshold)[1]
@@ -1220,6 +1295,7 @@ def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
     current implementation is functionally equivalent to passing
     up=up/down and down=1.
     """
+    from scipy.signal import get_window
     # check explicitly for backwards compatibility
     if not isinstance(axis, int):
         err = ("The axis parameter needs to be an integer (got %s). "
@@ -1229,6 +1305,7 @@ def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
         raise TypeError(err)
 
     # make sure our arithmetic will work
+    x = np.asanyarray(x)
     ratio = float(up) / down
     if axis < 0:
         axis = x.ndim + axis
@@ -1273,7 +1350,6 @@ def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
             y[xi] = fft_resample(x_, W, new_len, npad, to_remove,
                                  cuda_dict)
     else:
-        _check_njobs(n_jobs, can_be_cuda=True)
         parallel, p_fun, _ = parallel_func(fft_resample, n_jobs)
         y = parallel(p_fun(x_, W, new_len, npad, to_remove, cuda_dict)
                      for x_ in x_flat)
@@ -1287,6 +1363,61 @@ def resample(x, up, down, npad=100, axis=-1, window='boxcar', n_jobs=1,
     return y
 
 
+def _resample_stim_channels(stim_data, up, down):
+    """Resample stim channels, carefully.
+
+    Parameters
+    ----------
+    stim_data : 1D array, shape (n_samples,) |
+                2D array, shape (n_stim_channels, n_samples)
+        Stim channels to resample.
+    up : float
+        Factor to upsample by.
+    down : float
+        Factor to downsample by.
+
+    Returns
+    -------
+    stim_resampled : 2D array, shape (n_stim_channels, n_samples_resampled)
+        The resampled stim channels
+
+    Note
+    ----
+    The approach taken here is equivalent to the approach in the C-code.
+    See the decimate_stimch function in MNE/mne_browse_raw/save.c
+    """
+    stim_data = np.atleast_2d(stim_data)
+    n_stim_channels, n_samples = stim_data.shape
+
+    ratio = float(up) / down
+    resampled_n_samples = int(round(n_samples * ratio))
+
+    stim_resampled = np.zeros((n_stim_channels, resampled_n_samples))
+
+    # Figure out which points in old data to subsample protect against
+    # out-of-bounds, which can happen (having one sample more than
+    # expected) due to padding
+    sample_picks = np.minimum(
+        (np.arange(resampled_n_samples) / ratio).astype(int),
+        n_samples - 1
+    )
+
+    # Create windows starting from sample_picks[i], ending at sample_picks[i+1]
+    windows = zip(sample_picks, np.r_[sample_picks[1:], n_samples])
+
+    # Use the first non-zero value in each window
+    for window_i, window in enumerate(windows):
+        for stim_num, stim in enumerate(stim_data):
+            nonzero = stim[window[0]:window[1]].nonzero()[0]
+            if len(nonzero) > 0:
+                val = stim[window[0] + nonzero[0]]
+            else:
+                val = stim[window[0]]
+            stim_resampled[stim_num, window_i] = val
+
+    return stim_resampled
+
+
 def detrend(x, order=1, axis=-1):
     """Detrend the array x.
 
@@ -1314,6 +1445,7 @@ def detrend(x, order=1, axis=-1):
         >>> (detrend(x) - noise).max() < 0.01
         True
     """
+    from scipy.signal import detrend
     if axis > len(x.shape):
         raise ValueError('x does not have %d axes' % axis)
     if order == 0:
@@ -1323,7 +1455,7 @@ def detrend(x, order=1, axis=-1):
     else:
         raise ValueError('order must be 0 or 1')
 
-    y = signal.detrend(x, axis=axis, type=fit)
+    y = detrend(x, axis=axis, type=fit)
 
     return y
 
@@ -1351,8 +1483,8 @@ def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
             raise ValueError('filter_length, if a string, must be a '
                              'human-readable time (e.g., "10s"), not '
                              '"%s"' % filter_length)
-        filter_length = 2 ** int(np.ceil(np.log2(filter_length
-                                                 * mult_fact * sfreq)))
+        filter_length = 2 ** int(np.ceil(np.log2(filter_length *
+                                                 mult_fact * sfreq)))
         # shouldn't make filter longer than length of x
         if filter_length >= len_x:
             filter_length = len_x
@@ -1369,11 +1501,71 @@ def _get_filter_length(filter_length, sfreq, min_length=128, len_x=np.inf):
     return filter_length
 
 
-def _check_njobs(n_jobs, can_be_cuda=False):
-    if not isinstance(n_jobs, int):
-        if can_be_cuda is True:
-            raise ValueError('n_jobs must be an integer, or "cuda"')
-        else:
-            raise ValueError('n_jobs must be an integer')
-    if n_jobs < 1:
-        raise ValueError('n_jobs must be >= 1')
+class FilterMixin(object):
+    """Object for Epoch/Evoked filtering"""
+
+    def savgol_filter(self, h_freq):
+        """Filter the data using Savitzky-Golay polynomial method
+
+        Parameters
+        ----------
+        h_freq : float
+            Approximate high cut-off frequency in Hz. Note that this
+            is not an exact cutoff, since Savitzky-Golay filtering [1]_ is
+            done using polynomial fits instead of FIR/IIR filtering.
+            This parameter is thus used to determine the length of the
+            window over which a 5th-order polynomial smoothing is used.
+
+        See Also
+        --------
+        mne.io.Raw.filter
+
+        Notes
+        -----
+        Data are modified in-place.
+
+        For Savitzky-Golay low-pass approximation, see:
+
+            https://gist.github.com/Eric89GXL/bbac101d50176611136b
+
+
+        .. versionadded:: 0.9.0
+
+        Examples
+        --------
+        >>> import mne
+        >>> from os import path as op
+        >>> evoked_fname = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis-ave.fif')  # doctest:+SKIP
+        >>> evoked = mne.read_evokeds(evoked_fname, baseline=(None, 0))[0]  # doctest:+SKIP
+        >>> evoked.savgol_filter(10.)  # low-pass at around 10 Hz # doctest:+SKIP
+        >>> evoked.plot()  # doctest:+SKIP
+
+        References
+        ----------
+        .. [1] Savitzky, A., Golay, M.J.E. (1964). "Smoothing and
+               Differentiation of Data by Simplified Least Squares
+               Procedures". Analytical Chemistry 36 (8): 1627-39.
+        """  # noqa
+        from .evoked import Evoked
+        from .epochs import _BaseEpochs
+        if isinstance(self, Evoked):
+            data = self.data
+            axis = 1
+        elif isinstance(self, _BaseEpochs):
+            if not self.preload:
+                raise RuntimeError('data must be preloaded to filter')
+            data = self._data
+            axis = 2
+
+        h_freq = float(h_freq)
+        if h_freq >= self.info['sfreq'] / 2.:
+            raise ValueError('h_freq must be less than half the sample rate')
+
+        # savitzky-golay filtering
+        if not check_version('scipy', '0.14'):
+            raise RuntimeError('scipy >= 0.14 must be installed for savgol')
+        from scipy.signal import savgol_filter
+        window_length = (int(np.round(self.info['sfreq'] /
+                                      h_freq)) // 2) * 2 + 1
+        data[...] = savgol_filter(data, axis=axis, polyorder=5,
+                                  window_length=window_length)
diff --git a/mne/fixes.py b/mne/fixes.py
index 2b201bc..d8ceec7 100644
--- a/mne/fixes.py
+++ b/mne/fixes.py
@@ -12,6 +12,7 @@ at which the fixe is no longer needed.
 #          Lars Buitinck <L.J.Buitinck at uva.nl>
 # License: BSD
 
+from __future__ import division
 import collections
 from operator import itemgetter
 import inspect
@@ -19,15 +20,13 @@ import inspect
 import warnings
 import numpy as np
 import scipy
-from scipy import linalg
+from scipy import linalg, sparse
 from math import ceil, log
 from numpy.fft import irfft
-from nose.tools import assert_true
-from scipy.signal import filtfilt as sp_filtfilt
 from distutils.version import LooseVersion
 from functools import partial
 from .externals import six
-from .externals.six.moves import copyreg
+from .externals.six.moves import copyreg, xrange
 from gzip import GzipFile
 
 
@@ -35,8 +34,6 @@ from gzip import GzipFile
 # Misc
 
 class gzip_open(GzipFile):  # python2.6 doesn't have context managing
-    def __init__(self, *args, **kwargs):
-        return GzipFile.__init__(self, *args, **kwargs)
 
     def __enter__(self):
         if hasattr(GzipFile, '__enter__'):
@@ -148,19 +145,40 @@ else:
     copysign = np.copysign
 
 
-def _in1d(ar1, ar2, assume_unique=False):
+def _in1d(ar1, ar2, assume_unique=False, invert=False):
     """Replacement for in1d that is provided for numpy >= 1.4"""
+    # Ravel both arrays, behavior for the first array could be different
+    ar1 = np.asarray(ar1).ravel()
+    ar2 = np.asarray(ar2).ravel()
+
+    # This code is significantly faster when the condition is satisfied.
+    if len(ar2) < 10 * len(ar1) ** 0.145:
+        if invert:
+            mask = np.ones(len(ar1), dtype=np.bool)
+            for a in ar2:
+                mask &= (ar1 != a)
+        else:
+            mask = np.zeros(len(ar1), dtype=np.bool)
+            for a in ar2:
+                mask |= (ar1 == a)
+        return mask
+
+    # Otherwise use sorting
     if not assume_unique:
         ar1, rev_idx = unique(ar1, return_inverse=True)
         ar2 = np.unique(ar2)
+
     ar = np.concatenate((ar1, ar2))
     # We need this to be a stable sort, so always use 'mergesort'
     # here. The values from the first array should always come before
     # the values from the second array.
     order = ar.argsort(kind='mergesort')
     sar = ar[order]
-    equal_adj = (sar[1:] == sar[:-1])
-    flag = np.concatenate((equal_adj, [False]))
+    if invert:
+        bool_ar = (sar[1:] != sar[:-1])
+    else:
+        bool_ar = (sar[1:] == sar[:-1])
+    flag = np.concatenate((bool_ar, [invert]))
     indx = order.argsort(kind='mergesort')[:len(ar1)]
 
     if assume_unique:
@@ -168,7 +186,8 @@ def _in1d(ar1, ar2, assume_unique=False):
     else:
         return flag[indx][rev_idx]
 
-if not hasattr(np, 'in1d'):
+
+if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
     in1d = _in1d
 else:
     in1d = np.in1d
@@ -283,19 +302,86 @@ else:
     safe_copy = np.copy
 
 
-# wrap filtfilt, excluding padding arguments
-def _filtfilt(*args, **kwargs):
-    # cut out filter args
-    if len(args) > 4:
-        args = args[:4]
-    if 'padlen' in kwargs:
-        del kwargs['padlen']
-    return sp_filtfilt(*args, **kwargs)
+def _meshgrid(*xi, **kwargs):
+    """
+    Return coordinate matrices from coordinate vectors.
+    Make N-D coordinate arrays for vectorized evaluations of
+    N-D scalar/vector fields over N-D grids, given
+    one-dimensional coordinate arrays x1, x2,..., xn.
+    .. versionchanged:: 1.9
+       1-D and 0-D cases are allowed.
+    Parameters
+    ----------
+    x1, x2,..., xn : array_like
+        1-D arrays representing the coordinates of a grid.
+    indexing : {'xy', 'ij'}, optional
+        Cartesian ('xy', default) or matrix ('ij') indexing of output.
+        See Notes for more details.
+        .. versionadded:: 1.7.0
+    sparse : bool, optional
+        If True a sparse grid is returned in order to conserve memory.
+        Default is False.
+        .. versionadded:: 1.7.0
+    copy : bool, optional
+        If False, a view into the original arrays are returned in order to
+        conserve memory.  Default is True.  Please note that
+        ``sparse=False, copy=False`` will likely return non-contiguous
+        arrays.  Furthermore, more than one element of a broadcast array
+        may refer to a single memory location.  If you need to write to the
+        arrays, make copies first.
+        .. versionadded:: 1.7.0
+    Returns
+    -------
+    X1, X2,..., XN : ndarray
+        For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
+        return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
+        or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
+        with the elements of `xi` repeated to fill the matrix along
+        the first dimension for `x1`, the second for `x2` and so on.
+    """
+    ndim = len(xi)
+
+    copy_ = kwargs.pop('copy', True)
+    sparse = kwargs.pop('sparse', False)
+    indexing = kwargs.pop('indexing', 'xy')
+
+    if kwargs:
+        raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
+                        % (list(kwargs)[0],))
+
+    if indexing not in ['xy', 'ij']:
+        raise ValueError(
+            "Valid values for `indexing` are 'xy' and 'ij'.")
 
-if 'padlen' not in inspect.getargspec(sp_filtfilt)[0]:
-    filtfilt = _filtfilt
+    s0 = (1,) * ndim
+    output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
+              for i, x in enumerate(xi)]
+
+    shape = [x.size for x in output]
+
+    if indexing == 'xy' and ndim > 1:
+        # switch first and second axis
+        output[0].shape = (1, -1) + (1,) * (ndim - 2)
+        output[1].shape = (-1, 1) + (1,) * (ndim - 2)
+        shape[0], shape[1] = shape[1], shape[0]
+
+    if sparse:
+        if copy_:
+            return [x.copy() for x in output]
+        else:
+            return output
+    else:
+        # Return the full N-D matrix (not only the 1-D vector)
+        if copy_:
+            mult_fact = np.ones(shape, dtype=int)
+            return [x * mult_fact for x in output]
+        else:
+            return np.broadcast_arrays(*output)
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    meshgrid = _meshgrid
 else:
-    filtfilt = sp_filtfilt
+    meshgrid = np.meshgrid
 
 
 ###############################################################################
@@ -358,8 +444,8 @@ def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
     A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
     that decreases linearly on [0.5, 1.0] from 1 to 0:
 
-    >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
-    >>> print(taps[72:78])
+    >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])  # doctest: +SKIP
+    >>> print(taps[72:78])  # doctest: +SKIP
     [-0.02286961 -0.06362756  0.57310236  0.57310236 -0.06362756 -0.02286961]
 
     See also
@@ -449,10 +535,85 @@ def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
 
     return out
 
-if hasattr(scipy.signal, 'firwin2'):
-    from scipy.signal import firwin2
-else:
-    firwin2 = _firwin2
+
+def get_firwin2():
+    """Helper to get firwin2"""
+    try:
+        from scipy.signal import firwin2
+    except ImportError:
+        firwin2 = _firwin2
+    return firwin2
+
+
+def _filtfilt(*args, **kwargs):
+    """wrap filtfilt, excluding padding arguments"""
+    from scipy.signal import filtfilt
+    # cut out filter args
+    if len(args) > 4:
+        args = args[:4]
+    if 'padlen' in kwargs:
+        del kwargs['padlen']
+    return filtfilt(*args, **kwargs)
+
+
+def get_filtfilt():
+    """Helper to get filtfilt from scipy"""
+    from scipy.signal import filtfilt
+
+    if 'padlen' in inspect.getargspec(filtfilt)[0]:
+        return filtfilt
+
+    return _filtfilt
+
+
+def _get_argrelmax():
+    try:
+        from scipy.signal import argrelmax
+    except ImportError:
+        argrelmax = _argrelmax
+    return argrelmax
+
+
+def _argrelmax(data, axis=0, order=1, mode='clip'):
+    """Calculate the relative maxima of `data`.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative maxima.
+    axis : int, optional
+        Axis over which to select from `data`.  Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n, n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated.
+        Available options are 'wrap' (wrap around) or 'clip' (treat overflow
+        as the same as the last (or first) element).
+        Default 'clip'.  See `numpy.take`.
+
+    Returns
+    -------
+    extrema : tuple of ndarrays
+        Indices of the maxima in arrays of integers.  ``extrema[k]`` is
+        the array of indices of axis `k` of `data`.  Note that the
+        return value is a tuple even when `data` is one-dimensional.
+    """
+    comparator = np.greater
+    if((int(order) != order) or (order < 1)):
+        raise ValueError('Order must be an int >= 1')
+    datalen = data.shape[axis]
+    locs = np.arange(0, datalen)
+    results = np.ones(data.shape, dtype=bool)
+    main = data.take(locs, axis=axis, mode=mode)
+    for shift in xrange(1, order + 1):
+        plus = data.take(locs + shift, axis=axis, mode=mode)
+        minus = data.take(locs - shift, axis=axis, mode=mode)
+        results &= comparator(main, plus)
+        results &= comparator(main, minus)
+        if(~results.any()):
+            return results
+    return np.where(results)
 
 
 ###############################################################################
@@ -567,22 +728,161 @@ copyreg.pickle(partial, _reduce_partial)
 def normalize_colors(vmin, vmax, clip=False):
     """Helper to handle matplotlib API"""
     import matplotlib.pyplot as plt
-    if 'Normalize' in vars(plt):
+    try:
         return plt.Normalize(vmin, vmax, clip=clip)
-    else:
+    except AttributeError:
         return plt.normalize(vmin, vmax, clip=clip)
 
 
-def _assert_is(expr1, expr2, msg=None):
+def assert_true(expr, msg='False is not True'):
+    """Fake assert_true without message"""
+    if not expr:
+        raise AssertionError(msg)
+
+
+def assert_is(expr1, expr2, msg=None):
     """Fake assert_is without message"""
-    assert_true(expr2 is expr2)
+    assert_true(expr2 is expr2, msg)
+
 
-def _assert_is_not(expr1, expr2, msg=None):
+def assert_is_not(expr1, expr2, msg=None):
     """Fake assert_is_not without message"""
-    assert_true(expr2 is not expr2)
+    assert_true(expr1 is not expr2, msg)
+
+
+def _sparse_block_diag(mats, format=None, dtype=None):
+    """An implementation of scipy.sparse.block_diag since old versions of
+    scipy don't have it. Forms a sparse matrix by stacking matrices in block
+    diagonal form.
+
+    Parameters
+    ----------
+    mats : list of matrices
+        Input matrices.
+    format : str, optional
+        The sparse format of the result (e.g. "csr"). If not given, the
+        matrix is returned in "coo" format.
+    dtype : dtype specifier, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of blocks.
+
+    Returns
+    -------
+    res : sparse matrix
+    """
+    nmat = len(mats)
+    rows = []
+    for ia, a in enumerate(mats):
+        row = [None] * nmat
+        row[ia] = a
+        rows.append(row)
+    return sparse.bmat(rows, format=format, dtype=dtype)
 
 try:
-    from nose.tools import assert_is, assert_is_not
-except ImportError:
-    assert_is = _assert_is
-    assert_is_not = _assert_is_not
+    from scipy.sparse import block_diag as sparse_block_diag
+except Exception:
+    sparse_block_diag = _sparse_block_diag
+
+
+def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+    """
+    Returns a boolean array where two arrays are element-wise equal within a
+    tolerance.
+
+    The tolerance values are positive, typically very small numbers.  The
+    relative difference (`rtol` * abs(`b`)) and the absolute difference
+    `atol` are added together to compare against the absolute difference
+    between `a` and `b`.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to compare.
+    rtol : float
+        The relative tolerance parameter (see Notes).
+    atol : float
+        The absolute tolerance parameter (see Notes).
+    equal_nan : bool
+        Whether to compare NaN's as equal.  If True, NaN's in `a` will be
+        considered equal to NaN's in `b` in the output array.
+
+    Returns
+    -------
+    y : array_like
+        Returns a boolean array of where `a` and `b` are equal within the
+        given tolerance. If both `a` and `b` are scalars, returns a single
+        boolean value.
+
+    See Also
+    --------
+    allclose
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    For finite values, isclose uses the following equation to test whether
+    two floating point values are equivalent.
+
+     absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+    The above equation is not symmetric in `a` and `b`, so that
+    `isclose(a, b)` might be different from `isclose(b, a)` in
+    some rare cases.
+
+    Examples
+    --------
+    >>> isclose([1e10,1e-7], [1.00001e10,1e-8])
+    array([ True, False], dtype=bool)
+    >>> isclose([1e10,1e-8], [1.00001e10,1e-9])
+    array([ True,  True], dtype=bool)
+    >>> isclose([1e10,1e-8], [1.0001e10,1e-9])
+    array([False,  True], dtype=bool)
+    >>> isclose([1.0, np.nan], [1.0, np.nan])
+    array([ True, False], dtype=bool)
+    >>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+    array([ True,  True], dtype=bool)
+    """
+    def within_tol(x, y, atol, rtol):
+        with np.errstate(invalid='ignore'):
+            result = np.less_equal(abs(x - y), atol + rtol * abs(y))
+        if np.isscalar(a) and np.isscalar(b):
+            result = bool(result)
+        return result
+
+    x = np.array(a, copy=False, subok=True, ndmin=1)
+    y = np.array(b, copy=False, subok=True, ndmin=1)
+
+    # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+    # This will cause casting of x later. Also, make sure to allow subclasses
+    # (e.g., for numpy.ma).
+    dt = np.core.multiarray.result_type(y, 1.)
+    y = np.array(y, dtype=dt, copy=False, subok=True)
+
+    xfin = np.isfinite(x)
+    yfin = np.isfinite(y)
+    if np.all(xfin) and np.all(yfin):
+        return within_tol(x, y, atol, rtol)
+    else:
+        finite = xfin & yfin
+        cond = np.zeros_like(finite, subok=True)
+        # Because we're using boolean indexing, x & y must be the same shape.
+        # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
+        # lib.stride_tricks, though, so we can't import it here.
+        x = x * np.ones_like(cond)
+        y = y * np.ones_like(cond)
+        # Avoid subtraction with infinite/nan values...
+        cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
+        # Check for equality of infinite values...
+        cond[~finite] = (x[~finite] == y[~finite])
+        if equal_nan:
+            # Make NaN == NaN
+            both_nan = np.isnan(x) & np.isnan(y)
+            cond[both_nan] = both_nan[both_nan]
+        return cond
+
+
+if LooseVersion(np.__version__) < LooseVersion('1.7'):
+    isclose = _isclose
+else:
+    isclose = np.isclose
diff --git a/mne/forward/__init__.py b/mne/forward/__init__.py
index 67292f5..b2da0c0 100644
--- a/mne/forward/__init__.py
+++ b/mne/forward/__init__.py
@@ -1,5 +1,5 @@
 from .forward import (Forward, read_forward_solution, write_forward_solution,
-                      is_fixed_orient, read_forward_meas_info,
+                      is_fixed_orient, _read_forward_meas_info,
                       write_forward_meas_info,
                       compute_orient_prior, compute_depth_prior,
                       apply_forward, apply_forward_raw,
@@ -8,7 +8,12 @@ from .forward import (Forward, read_forward_solution, write_forward_solution,
                       _restrict_gain_matrix, _stc_src_sel,
                       _fill_measurement_info, _apply_forward,
                       _subject_from_forward, convert_forward_solution,
-                      _to_fixed_ori, prepare_bem_model)
-from ._make_forward import make_forward_solution
-from ._field_interpolation import _make_surface_mapping, make_field_map
+                      _to_fixed_ori, prepare_bem_model, _merge_meg_eeg_fwds)
+from ._make_forward import (make_forward_solution, _prepare_for_forward,
+                            _prep_meg_channels, _prep_eeg_channels,
+                            _to_forward_dict, _create_meg_coils)
+from ._compute_forward import (_magnetic_dipole_field_vec, _compute_forwards,
+                               _concatenate_coils)
+from ._field_interpolation import (_make_surface_mapping, make_field_map,
+                                   _as_meg_type_evoked, _map_meg_channels)
 from . import _lead_dots  # for testing purposes
diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py
index d4b67da..583f0bb 100644
--- a/mne/forward/_compute_forward.py
+++ b/mne/forward/_compute_forward.py
@@ -1,9 +1,17 @@
+# -*- coding: utf-8 -*-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larsoner at uw.edu>
+#          Mark Wronkiewicz <wronk at uw.edu>
 #
 # License: BSD (3-clause)
+#
+# Many of the idealized equations behind these calculations can be found in:
+# 1) Realistic conductivity geometry model of the human head for interpretation
+#        of neuromagnetic data. Hamalainen and Sarvas, 1989. Specific to MNE
+# 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and
+#        Lewis, 1999. Generalized discussion of forward solutions.
 
 import numpy as np
 from copy import deepcopy
@@ -12,17 +20,17 @@ from ..surface import (fast_cross_3d, _find_nearest_tri_pt, _get_tri_supp_geom,
                        _triangle_coords)
 from ..io.constants import FIFF
 from ..transforms import apply_trans
-from ..utils import logger
+from ..utils import logger, verbose
 from ..parallel import parallel_func
 from ..io.compensator import get_current_comp, make_compensator
 from ..io.pick import pick_types
 
 
-##############################################################################
-# COIL SPECIFICATION
+# #############################################################################
+# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX
 
 def _dup_coil_set(coils, coord_frame, t):
-    """Make a duplicate"""
+    """Make a duplicate."""
     if t is not None and coord_frame != t['from']:
         raise RuntimeError('transformation frame does not match the coil set')
     coils = deepcopy(coils)
@@ -40,122 +48,215 @@ def _dup_coil_set(coils, coord_frame, t):
 
 
 def _check_coil_frame(coils, coord_frame, bem):
-    """Check to make sure the coils are in the correct coordinate frame"""
+    """Check to make sure the coils are in the correct coordinate frame."""
     if coord_frame != FIFF.FIFFV_COORD_MRI:
         if coord_frame == FIFF.FIFFV_COORD_HEAD:
             # Make a transformed duplicate
             coils, coord_Frame = _dup_coil_set(coils, coord_frame,
                                                bem['head_mri_t'])
         else:
-            raise RuntimeError('Bad coil coordinate frame %d' % coord_frame)
+            raise RuntimeError('Bad coil coordinate frame %s' % coord_frame)
     return coils, coord_frame
 
 
-def _bem_lin_field_coeffs_simple(dest, normal, tri_rr, tri_nn, tri_area):
-    """Simple version..."""
-    out = np.zeros((3, len(dest)))
-    for rr, o in zip(tri_rr, out):
-        diff = dest - rr
-        dl = np.sum(diff * diff, axis=1)
-        x = fast_cross_3d(diff, tri_nn[np.newaxis, :])
-        o[:] = tri_area * np.sum(x * normal, axis=1) / (3.0 * dl * np.sqrt(dl))
-    return out
-
-
-def _lin_field_coeff(s, mult, rmags, cosmags, ws, counts, func, n_jobs):
-    """Use the linear field approximation to get field coefficients"""
+def _lin_field_coeff(surf, mult, rmags, cosmags, ws, n_int, n_jobs):
+    """Parallel wrapper for _do_lin_field_coeff to compute linear coefficients.
+
+    Parameters
+    ----------
+    surf : dict
+        Dict containing information for one surface of the BEM
+    mult : float
+        Multiplier for particular BEM surface (Iso Skull Approach discussed in
+        Mosher et al., 1999 and Hamalainen and Sarvas, 1989 Section III?)
+    rmag : ndarray, shape (n_integration_pts, 3)
+        3D positions of MEG coil integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_integration_pts, 3)
+        Direction of the MEG coil integration points (from coil['cosmag'])
+    ws : ndarray, shape (n_sensor_pts,)
+        Weights for MEG coil integration points
+    n_int : ndarray, shape (n_MEG_sensors,)
+        Number of integration points for each MEG sensor
+    n_jobs : int
+        Number of jobs to run in parallel
+
+    Returns
+    -------
+    coeff : list
+        Linear coefficients with lead fields for each BEM vertex on each sensor
+        (?)
+    """
     parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs)
     nas = np.array_split
-    coeffs = parallel(p_fun(s['rr'], t, tn, ta,
-                            rmags, cosmags, ws, counts, func)
-                      for t, tn, ta in zip(nas(s['tris'], n_jobs),
-                                           nas(s['tri_nn'], n_jobs),
-                                           nas(s['tri_area'], n_jobs)))
+    coeffs = parallel(p_fun(surf['rr'], t, tn, ta, rmags, cosmags, ws, n_int)
+                      for t, tn, ta in zip(nas(surf['tris'], n_jobs),
+                                           nas(surf['tri_nn'], n_jobs),
+                                           nas(surf['tri_area'], n_jobs)))
     return mult * np.sum(coeffs, axis=0)
 
 
-def _do_lin_field_coeff(rr, t, tn, ta, rmags, cosmags, ws, counts, func):
-    """Actually get field coefficients (parallel-friendly)"""
-    coeff = np.zeros((len(counts), len(rr)))
-    bins = np.repeat(np.arange(len(counts)), counts)
-    for tri, tri_nn, tri_area in zip(t, tn, ta):
-        # Accumulate the coefficients for each triangle node
-        # and add to the corresponding coefficient matrix
-        tri_rr = rr[tri]
+def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, n_int):
+    """Compute field coefficients (parallel-friendly).
+
+    See section IV of Mosher et al., 1999 (specifically equation 35).
+
+    Parameters
+    ----------
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        Positions on one BEM surface in 3-space. 2562 BEM vertices for BEM with
+        5120 triangles (ico-4)
+    tris : ndarray, shape (n_BEM_vertices, 3)
+        Vertex indices for each triangle (referring to bem_rr)
+    tn : ndarray, shape (n_BEM_vertices, 3)
+        Triangle unit normal vectors
+    ta : ndarray, shape (n_BEM_vertices,)
+        Triangle areas
+    rmag : ndarray, shape (n_sensor_pts, 3)
+        3D positions of MEG coil integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_sensor_pts, 3)
+        Direction of the MEG coil integration points (from coil['cosmag'])
+    ws : ndarray, shape (n_sensor_pts,)
+        Weights for MEG coil integration points
+    n_int : ndarray, shape (n_MEG_sensors,)
+        Number of integration points for each MEG sensor
+
+    Returns
+    -------
+    coeff : ndarray, shape (n_MEG_sensors, n_BEM_vertices)
+        Linear coefficients with effect of each BEM vertex on each sensor (?)
+    """
+    coeff = np.zeros((len(n_int), len(bem_rr)))
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+    for tri, tri_nn, tri_area in zip(tris, tn, ta):
+        # Accumulate the coefficients for each triangle node and add to the
+        # corresponding coefficient matrix
+        tri_rr = bem_rr[tri]
 
         # The following is equivalent to:
-        #for j, coil in enumerate(coils['coils']):
-        #    x = func(coil['rmag'], coil['cosmag'],
-        #             tri_rr, tri_nn, tri_area)
-        #    res = np.sum(coil['w'][np.newaxis, :] * x, axis=1)
-        #    coeff[j][tri + off] += mult * res
-
-        xx = func(rmags, cosmags, tri_rr, tri_nn, tri_area)
-        # only loops 3x (one per direction)
-        zz = np.array([np.bincount(bins, weights=x * ws,
-                                   minlength=len(counts)) for x in xx])
-        coeff[:, tri] += zz.T
+        # for j, coil in enumerate(coils['coils']):
+        #     x = func(coil['rmag'], coil['cosmag'],
+        #              tri_rr, tri_nn, tri_area)
+        #     res = np.sum(coil['w'][np.newaxis, :] * x, axis=1)
+        #     coeff[j][tri + off] += mult * res
+
+        # Simple version (bem_lin_field_coeffs_simple)
+        zz = []
+        for trr in tri_rr:
+            diff = rmags - trr
+            dl = np.sum(diff * diff, axis=1)
+            c = fast_cross_3d(diff, tri_nn[np.newaxis, :])
+            x = tri_area * np.sum(c * cosmags, axis=1) / \
+                (3.0 * dl * np.sqrt(dl))
+            zz += [np.bincount(bins, weights=x * ws, minlength=len(n_int))]
+        coeff[:, tri] += np.array(zz).T
     return coeff
 
 
-def _bem_specify_coils(bem, coils, coord_frame, n_jobs):
-    """Set up for computing the solution at a set of coils"""
-    # Compute the weighting factors to obtain the magnetic field
-    # in the linear potential approximation
+def _concatenate_coils(coils):
+    """Helper to concatenate MEG coil parameters."""
+    rmags = np.concatenate([coil['rmag'] for coil in coils])
+    cosmags = np.concatenate([coil['cosmag'] for coil in coils])
+    ws = np.concatenate([coil['w'] for coil in coils])
+    n_int = np.array([len(coil['rmag']) for coil in coils])
+    return rmags, cosmags, ws, n_int
+
+
+def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs):
+    """Set up for computing the solution at a set of MEG coils.
+
+    Parameters
+    ----------
+    bem : dict
+        BEM information
+    coils : list of dict, len(n_MEG_sensors)
+        MEG sensor information dicts
+    coord_frame : int
+        Class constant identifying coordinate frame
+    mults : ndarray, shape (1, n_BEM_vertices)
+        Multiplier for every vertex in BEM
+    n_jobs : int
+        Number of jobs to run in parallel
+
+    Returns
+    -------
+    sol: ndarray, shape (n_MEG_sensors, n_BEM_vertices)
+        MEG solution
+    """
+    # Make sure MEG coils are in MRI coordinate frame to match BEM coords
     coils, coord_frame = _check_coil_frame(coils, coord_frame, bem)
 
     # leaving this in in case we want to easily add in the future
-    #if method != 'simple':  # in ['ferguson', 'urankar']:
-    #    raise NotImplementedError
-    #else:
-    func = _bem_lin_field_coeffs_simple
+    # if method != 'simple':  # in ['ferguson', 'urankar']:
+    #     raise NotImplementedError
 
-    # Process each of the surfaces
-    rmags = np.concatenate([coil['rmag'] for coil in coils])
-    cosmags = np.concatenate([coil['cosmag'] for coil in coils])
-    counts = np.array([len(coil['rmag']) for coil in coils])
-    ws = np.concatenate([coil['w'] for coil in coils])
+    # Compute the weighting factors to obtain the magnetic field in the linear
+    # potential approximation
 
+    # Process each of the surfaces
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
     lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]])
-    coeff = np.empty((len(counts), lens[-1]))
+    coeff = np.empty((len(n_int), lens[-1]))  # shape(n_coils, n_BEM_verts)
+
+    # Compute coeffs for each surface, one at a time
     for o1, o2, surf, mult in zip(lens[:-1], lens[1:],
                                   bem['surfs'], bem['field_mult']):
-        coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags,
-                                           ws, counts, func, n_jobs)
+        coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags, ws,
+                                           n_int, n_jobs)
     # put through the bem
     sol = np.dot(coeff, bem['solution'])
+    sol *= mults
     return sol
 
 
-def _bem_specify_els(bem, els):
-    """Set up for computing the solution at a set of electrodes"""
+def _bem_specify_els(bem, els, mults):
+    """Set up for computing the solution at a set of EEG electrodes.
+
+    Parameters
+    ----------
+    bem : dict
+        BEM information
+    els : list of dict, len(n_EEG_sensors)
+        List of EEG sensor information dicts
+    mults: ndarray, shape (1, n_BEM_vertices)
+        Multiplier for every vertex in BEM
+
+    Returns
+    -------
+    sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices)
+        EEG solution
+    """
     sol = np.zeros((len(els), bem['solution'].shape[1]))
-    # Go through all coils
     scalp = bem['surfs'][0]
+    # Get supplementary geometry information for tris and rr
     scalp['geom'] = _get_tri_supp_geom(scalp['tris'], scalp['rr'])
-    inds = np.arange(len(scalp['tris']))
+    inds = np.arange(len(scalp['tris']))  # Inds of every BEM vertex
 
+    # Iterate over all electrodes
     # In principle this could be parallelized, but pickling overhead is huge
     # (makes it slower than non-parallel)
     for k, el in enumerate(els):
-        # Go through all 'integration points'
+        # Get electrode and reference position in head coords
         el_r = apply_trans(bem['head_mri_t']['trans'], el['rmag'])
+        # Iterate over all integration points
         for elw, r in zip(el['w'], el_r):
+            # Get index of closest tri on scalp BEM to electrode position
             best = _find_nearest_tri_pt(inds, r, scalp['geom'], True)[2]
             # Calculate a linear interpolation between the vertex values
-            tri = scalp['tris'][best]
+            tri = scalp['tris'][best]  # Get 3 vertex indices of closest tri
+            # Get coords of pt projected onto closest triangle
             x, y, z = _triangle_coords(r, scalp['geom'], best)
             w = elw * np.array([(1.0 - x - y), x, y])
             amt = np.dot(w, bem['solution'][tri])
             sol[k] += amt
+    sol *= mults
     return sol
 
 
-#############################################################################
-# FORWARD COMPUTATION
+# #############################################################################
+# COMPENSATION
 
 def _make_ctf_comp_coils(info, coils):
-    """Get the correct compensator for CTF coils"""
+    """Get the correct compensator for CTF coils."""
     # adapted from mne_make_ctf_comp() from mne_ctf_comp.c
     logger.info('Setting up compensation data...')
     comp_num = get_current_comp(info)
@@ -178,172 +279,585 @@ def _make_ctf_comp_coils(info, coils):
     return compensator
 
 
-#def _bem_inf_pot(rd, Q, rp):
-#    """The infinite medium potential in one direction"""
-#    # NOTE: the (4.0 * np.pi) that was in the denominator has been moved!
-#    diff = rp - rd
-#    diff2 = np.sum(diff * diff, axis=1)
-#    return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2))
+# #############################################################################
+# BEM COMPUTATION
+
+_MAG_FACTOR = 1e-7  # μ_0 / (4π)
+
+# def _bem_inf_pot(rd, Q, rp):
+#     """The infinite medium potential in one direction. See Eq. (8) in
+#     Mosher, 1999"""
+#     NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
+#     diff = rp - rd  # (Observation point position) - (Source position)
+#     diff2 = np.sum(diff * diff, axis=1)  # Squared magnitude of diff
+#     # (Dipole moment) dot (diff) / (magnitude ^ 3)
+#     return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2))
+
 
+def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None):
+    """Compute the infinite medium potential in all 3 directions.
 
-def _bem_inf_pots(rr, surf_rr, Q=None):
-    """The infinite medium potential in all 3 directions"""
-    # NOTE: the (4.0 * np.pi) that was in the denominator has been moved!
-    diff = surf_rr.T[np.newaxis, :, :] - rr[:, :, np.newaxis]  # n_rr, 3, n_bem
+    Parameters
+    ----------
+    mri_rr : ndarray, shape (n_dipole_vertices, 3)
+        Chunk of 3D dipole positions in MRI coordinates
+    bem_rr: ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for one BEM surface
+    mri_Q : ndarray, shape (3, 3)
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+
+    Returns
+    -------
+    ndarray : shape(n_dipole_vertices, 3, n_BEM_vertices)
+    """
+    # NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
+    # Get position difference vector between BEM vertex and dipole
+    diff = bem_rr.T[np.newaxis, :, :] - mri_rr[:, :, np.newaxis]
     diff_norm = np.sum(diff * diff, axis=1)
-    diff_norm *= np.sqrt(diff_norm)
+    diff_norm *= np.sqrt(diff_norm)  # Position difference magnitude cubed
     diff_norm[diff_norm == 0] = 1  # avoid nans
-    if Q is None:  # save time when Q=np.eye(3) (e.g., MEG sensors)
+    if mri_Q is None:  # save time when mri_Q=np.eye(3) (e.g., MEG sensors)
         return diff / diff_norm[:, np.newaxis, :]
     else:  # get components in each direction (e.g., EEG sensors)
-        return np.einsum('ijk,mj->imk', diff, Q) / diff_norm[:, np.newaxis, :]
+        return np.einsum('ijk,mj->imk', diff, mri_Q) / diff_norm[:,
+                                                                 np.newaxis, :]
 
 
 # This function has been refactored to process all points simultaneously
-#def _bem_inf_field(rd, Q, rp, d):
-#    """Infinite-medium magnetic field"""
-#    diff = rp - rd
-#    diff2 = np.sum(diff * diff, axis=1)
-#    x = fast_cross_3d(Q[np.newaxis, :], diff)
-#    return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2))
-
-
-def _bem_inf_fields(rr, rp, c):
-    """Infinite-medium magnetic field in all 3 basis directions"""
-    # Knowing that we're doing all directions, the above can be refactored:
-    diff = rp.T[np.newaxis, :, :] - rr[:, :, np.newaxis]
+# def _bem_inf_field(rd, Q, rp, d):
+# """Infinite-medium magnetic field. See (7) in Mosher, 1999"""
+#     # Get vector from source to sensor integration point
+#     diff = rp - rd
+#     diff2 = np.sum(diff * diff, axis=1)  # Get magnitude of diff
+#
+#     # Compute cross product between diff and dipole to get magnetic field at
+#     # integration point
+#     x = fast_cross_3d(Q[np.newaxis, :], diff)
+#
+#     # Take magnetic field dotted by integration point normal to get magnetic
+#     # field threading the current loop. Divide by R^3 (equivalently, R^2 * R)
+#     return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2))
+
+
+def _bem_inf_fields(rr, rmag, cosmag):
+    """Compute infinite-medium magnetic field at one MEG sensor from all
+    dipoles in all 3 basis directions.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_source_points, 3)
+        3D dipole source positions
+    rmag : ndarray, shape (n_sensor points, 3)
+        3D positions of 1 MEG coil's integration points (from coil['rmag'])
+    cosmag : ndarray, shape (n_sensor_points, 3)
+        Direction of 1 MEG coil's integration points (from coil['cosmag'])
+
+    Returns
+    -------
+    ndarray, shape (n_dipoles, 3, n_integration_pts)
+        Magnetic field from all dipoles at each MEG sensor integration point
+    """
+    # rr, rmag refactored according to Equation (19) in Mosher, 1999
+    # Knowing that we're doing all directions, refactor above function:
+
+    diff = rmag.T[np.newaxis, :, :] - rr[:, :, np.newaxis]
     diff_norm = np.sum(diff * diff, axis=1)
-    diff_norm *= np.sqrt(diff_norm)
+    diff_norm *= np.sqrt(diff_norm)  # Get magnitude of distance cubed
     diff_norm[diff_norm == 0] = 1  # avoid nans
+
     # This is the result of cross-prod calcs with basis vectors,
-    # as if we had taken (Q=np.eye(3)), then multiplied by the cosmags (c)
+    # as if we had taken (Q=np.eye(3)), then multiplied by cosmags
     # factor, and then summed across directions
-    x = np.array([diff[:, 1] * c[:, 2] - diff[:, 2] * c[:, 1],
-                  diff[:, 2] * c[:, 0] - diff[:, 0] * c[:, 2],
-                  diff[:, 0] * c[:, 1] - diff[:, 1] * c[:, 0]])
+    x = np.array([diff[:, 1] * cosmag[:, 2] - diff[:, 2] * cosmag[:, 1],
+                  diff[:, 2] * cosmag[:, 0] - diff[:, 0] * cosmag[:, 2],
+                  diff[:, 0] * cosmag[:, 1] - diff[:, 1] * cosmag[:, 0]])
     return np.rollaxis(x / diff_norm, 1)
 
 
-def _bem_pot_or_field(rr, mri_rr, mri_Q, mults, coils, solution, srr,
-                      n_jobs, coil_type):
-    """Calculate the magnetic field or electric potential
-
-    The code is very similar between EEG and MEG potentials, so we'll
-    combine them.
+def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
+                      coil_type):
+    """Calculate the magnetic field or electric potential forward solution.
+
+    The code is very similar between EEG and MEG potentials, so combine them.
+    This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field")
+    and "fwd_bem_pot_els" in MNE-C.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions
+    mri_rr : ndarray, shape (n_dipoles, 3)
+        3D source positions in MRI coordinates
+    mri_Q :
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+    coils : list of dict, len(sensors)
+        List of sensors where each element contains sensor specific information
+    solution : ndarray, shape (n_sensors, n_BEM_rr)
+        Comes from _bem_specify_coils
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for all surfaces in the BEM
+    n_jobs : int
+        Number of jobs to run in parallel
+    coil_type : str
+        'meg' or 'eeg'
+
+    Returns
+    -------
+    B : ndarray, shape (n_dipoles * 3, n_sensors)
+        Foward solution for a set of sensors
     """
-    # multiply solution by "mults" here for simplicity
-    # we can do this one in-place because it's not used elsewhere
-    solution *= mults
-
     # Both MEG and EEG have the inifinite-medium potentials
     # This could be just vectorized, but eats too much memory, so instead we
     # reduce memory by chunking within _do_inf_pots and parallelize, too:
     parallel, p_fun, _ = parallel_func(_do_inf_pots, n_jobs)
     nas = np.array_split
     B = np.sum(parallel(p_fun(mri_rr, sr.copy(), mri_Q, sol.copy())
-                        for sr, sol in zip(nas(srr, n_jobs),
+                        for sr, sol in zip(nas(bem_rr, n_jobs),
                                            nas(solution.T, n_jobs))), axis=0)
     # The copy()s above should make it so the whole objects don't need to be
     # pickled...
 
-    # Only MEG gets the primary current distribution
+    # Only MEG coils are sensitive to the primary current distribution.
     if coil_type == 'meg':
         # Primary current contribution (can be calc. in coil/dipole coords)
         parallel, p_fun, _ = parallel_func(_do_prim_curr, n_jobs)
         pcc = np.concatenate(parallel(p_fun(rr, c)
                                       for c in nas(coils, n_jobs)), axis=1)
         B += pcc
-        B *= 1e-7  # MAG_FACTOR from C code
+        B *= _MAG_FACTOR
     return B
 
 
 def _do_prim_curr(rr, coils):
-    """Calculate primary currents in a set of coils"""
-    out = np.empty((len(rr) * 3, len(coils)))
+    """Calculate primary currents in a set of MEG coils.
+
+    See Mosher et al., 1999 Section II for discussion of primary vs. volume
+    currents.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in head coordinates
+    coils : list of dict
+        List of MEG coils where each element contains coil specific information
+
+    Returns
+    -------
+    pc : ndarray, shape (n_sources, n_MEG_sensors)
+        Primary current for set of MEG coils due to all sources
+    """
+    pc = np.empty((len(rr) * 3, len(coils)))
     for ci, c in enumerate(coils):
-        out[:, ci] = np.sum(c['w'] * _bem_inf_fields(rr, c['rmag'],
-                                                     c['cosmag']), 2).ravel()
-    return out
-
+        # For all integration points, multiply by weights, sum across pts
+        # and then flatten
+        pc[:, ci] = np.sum(c['w'] * _bem_inf_fields(rr, c['rmag'],
+                                                    c['cosmag']), 2).ravel()
+    return pc
+
+
+def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol):
+    """Calculate infinite potentials for MEG or EEG sensors using chunks.
+
+    Parameters
+    ----------
+    mri_rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in MRI coordinates
+    bem_rr : ndarray, shape (n_BEM_vertices, 3)
+        3D vertex positions for all surfaces in the BEM
+    mri_Q :
+        3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
+    sol : ndarray, shape (n_sensors_subset, n_BEM_vertices_subset)
+        Comes from _bem_specify_coils
+
+    Returns
+    -------
+    B : ndarray, (n_dipoles * 3, n_sensors)
+        Foward solution for sensors due to volume currents
+    """
 
-def _do_inf_pots(rr, srr, mri_Q, sol):
-    """Calculate infinite potentials using chunks"""
+    # Doing work of 'fwd_bem_pot_calc' in MNE-C
     # The following code is equivalent to this, but saves memory
-    #v0s = _bem_inf_pots(rr, srr, mri_Q)  # n_rr x 3 x n_surf_rr
-    #v0s.shape = (len(rr) * 3, v0s.shape[2])
-    #B = np.dot(v0s, sol)
+    # v0s = _bem_inf_pots(rr, bem_rr, Q)  # n_rr x 3 x n_bem_rr
+    # v0s.shape = (len(rr) * 3, v0s.shape[2])
+    # B = np.dot(v0s, sol)
 
-    # We chunk the source rr's in order to save memory
-    bounds = np.r_[np.arange(0, len(rr), 1000), len(rr)]
-    B = np.empty((len(rr) * 3, sol.shape[1]))
+    # We chunk the source mri_rr's in order to save memory
+    bounds = np.r_[np.arange(0, len(mri_rr), 1000), len(mri_rr)]
+    B = np.empty((len(mri_rr) * 3, sol.shape[1]))
     for bi in range(len(bounds) - 1):
-        v0s = _bem_inf_pots(rr[bounds[bi]:bounds[bi + 1]], srr, mri_Q)
+        # v0 in Hamalainen et al., 1989 == v_inf in Mosher, et al., 1999
+        v0s = _bem_inf_pots(mri_rr[bounds[bi]:bounds[bi + 1]], bem_rr, mri_Q)
         v0s.shape = (v0s.shape[0] * 3, v0s.shape[2])
         B[3 * bounds[bi]:3 * bounds[bi + 1]] = np.dot(v0s, sol)
     return B
 
 
-def _compute_forwards(src, bem, coils_list, cfs, ccoils_list, ccfs,
-                      infos, coil_types, n_jobs):
-    """Compute the MEG and EEG forward solutions"""
-    if bem['bem_method'] != 'linear collocation':
-        raise RuntimeError('only linear collocation supported')
+# #############################################################################
+# SPHERE COMPUTATION
 
-    # Precompute some things that are used for both MEG and EEG
-    rr = np.concatenate([s['rr'][s['vertno']] for s in src])
-    mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
-                      [len(s['rr']) for s in bem['surfs']])[np.newaxis, :]
-    # The dipole location and orientation must be transformed
-    mri_rr = apply_trans(bem['head_mri_t']['trans'], rr)
-    mri_Q = apply_trans(bem['head_mri_t']['trans'], np.eye(3), False)
-    srr = np.concatenate([s['rr'] for s in bem['surfs']])
+def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, sphere, bem_rr,
+                         n_jobs, coil_type):
+    """Do potential or field for spherical model."""
+    fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field
+    parallel, p_fun, _ = parallel_func(fun, n_jobs)
+    B = np.concatenate(parallel(p_fun(r, coils, sphere)
+                       for r in np.array_split(rr, n_jobs)))
+    return B
 
-    # Now, actually compute MEG and EEG solutions
-    Bs = list()
-    for coil_type, coils, cf, ccoils, ccf, info in zip(coil_types, coils_list,
-                                                       cfs, ccoils_list, ccfs,
-                                                       infos):
-        if coils is None:  # nothing to do
-            Bs.append(None)
-        else:
+
+def _sphere_field(rrs, coils, sphere):
+    """Compute field for spherical model using Jukka Sarvas' field computation.
+
+    Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the
+    biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22.
+
+    The formulas have been manipulated for efficient computation
+    by Matti Hamalainen, February 1990
+    """
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+
+    # Shift to the sphere model coordinates
+    rrs = rrs - sphere['r0']
+
+    B = np.zeros((3 * len(rrs), len(coils)))
+    for ri, rr in enumerate(rrs):
+        # Check for a dipole at the origin
+        if np.sqrt(np.dot(rr, rr)) <= 1e-10:
+            continue
+        this_poss = rmags - sphere['r0']
+
+        # Vector from dipole to the field point
+        a_vec = this_poss - rr
+        a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
+        r = np.sqrt(np.sum(this_poss * this_poss, axis=1))
+        rr0 = np.sum(this_poss * rr, axis=1)
+        ar = (r * r) - rr0
+        ar0 = ar / a
+        F = a * (r * a + ar)
+        gr = (a * a) / r + ar0 + 2.0 * (a + r)
+        g0 = a + 2 * r + ar0
+        # Compute the dot products needed
+        re = np.sum(this_poss * cosmags, axis=1)
+        r0e = np.sum(rr * cosmags, axis=1)
+        g = (g0 * r0e - gr * re) / (F * F)
+        good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5)
+        v1 = fast_cross_3d(rr[np.newaxis, :], cosmags)
+        v2 = fast_cross_3d(rr[np.newaxis, :], this_poss)
+        xx = ((good * ws)[:, np.newaxis] *
+              (v1 / F[:, np.newaxis] + v2 * g[:, np.newaxis]))
+        zz = np.array([np.bincount(bins, weights=x,
+                                   minlength=len(n_int)) for x in xx.T])
+        B[3 * ri:3 * ri + 3, :] = zz
+    B *= _MAG_FACTOR
+    return B
+
+
+def _eeg_spherepot_coil(rrs, coils, sphere):
+    """Calculate the EEG in the sphere model."""
+    rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+
+    # Shift to the sphere model coordinates
+    rrs = rrs - sphere['r0']
+
+    B = np.zeros((3 * len(rrs), len(coils)))
+    for ri, rr in enumerate(rrs):
+        # Only process dipoles inside the innermost sphere
+        if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']:
+            continue
+        # fwd_eeg_spherepot_vec
+        vval_one = np.zeros((len(rmags), 3))
+
+        # Make a weighted sum over the equivalence parameters
+        for eq in range(sphere['nfit']):
+            # Scale the dipole position
+            rd = sphere['mu'][eq] * rr
+            rd2 = np.sum(rd * rd)
+            rd2_inv = 1.0 / rd2
+            # Go over all electrodes
+            this_pos = rmags - sphere['r0']
+
+            # Scale location onto the surface of the sphere (not used)
+            # if sphere['scale_pos']:
+            #     pos_len = (sphere['layers'][-1]['rad'] /
+            #                np.sqrt(np.sum(this_pos * this_pos, axis=1)))
+            #     this_pos *= pos_len
+
+            # Vector from dipole to the field point
+            a_vec = this_pos - rd
+
+            # Compute the dot products needed
+            a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
+            a3 = 2.0 / (a * a * a)
+            r2 = np.sum(this_pos * this_pos, axis=1)
+            r = np.sqrt(r2)
+            rrd = np.sum(this_pos * rd, axis=1)
+            ra = r2 - rrd
+            rda = rrd - rd2
+
+            # The main ingredients
+            F = a * (r * a + ra)
+            c1 = a3 * rda + 1.0 / a - 1.0 / r
+            c2 = a3 + (a + r) / (r * F)
+
+            # Mix them together and scale by lambda/(rd*rd)
+            m1 = (c1 - c2 * rrd)
+            m2 = c2 * rd2
+
+            vval_one += (sphere['lambda'][eq] * rd2_inv *
+                         (m1[:, np.newaxis] * rd +
+                          m2[:, np.newaxis] * this_pos))
+
+            # compute total result
+            xx = vval_one * ws[:, np.newaxis]
+            zz = np.array([np.bincount(bins, weights=x,
+                                       minlength=len(n_int)) for x in xx.T])
+            B[3 * ri:3 * ri + 3, :] = zz
+    # finishing by scaling by 1/(4*M_PI)
+    B *= 0.25 / np.pi
+    return B
+
+
+# #############################################################################
+# MAGNETIC DIPOLE (e.g. CHPI)
+
+def _magnetic_dipole_field_vec(rrs, coils):
+    """Compute an MEG forward solution for a set of magnetic dipoles."""
+    fwd = np.empty((3 * len(rrs), len(coils)))
+    # The code below is a more efficient version (~30x) of this:
+    # for ri, rr in enumerate(rrs):
+    #     for k in range(len(coils)):
+    #         this_coil = coils[k]
+    #         # Go through all points
+    #         diff = this_coil['rmag'] - rr
+    #         dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
+    #         dist = np.sqrt(dist2)
+    #         if (dist < 1e-5).any():
+    #             raise RuntimeError('Coil too close')
+    #         dist5 = dist2 * dist2 * dist
+    #         sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'],
+    #                                   axis=1)[:, np.newaxis] -
+    #                 dist2 * this_coil['cosmag']) / dist5
+    #         fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_)
+    if isinstance(coils, tuple):
+        rmags, cosmags, ws, n_int = coils
+    else:
+        rmags, cosmags, ws, n_int = _concatenate_coils(coils)
+    del coils
+
+    fwd = np.empty((3 * len(rrs), len(n_int)))
+    bins = np.repeat(np.arange(len(n_int)), n_int)
+    for ri, rr in enumerate(rrs):
+        diff = rmags - rr
+        dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
+        dist = np.sqrt(dist2)
+        if (dist < 1e-5).any():
+            raise RuntimeError('Coil too close (dist = %g m)' % dist.min())
+        sum_ = ws[:, np.newaxis] * (3 * diff * np.sum(diff * cosmags,
+                                                      axis=1)[:, np.newaxis] -
+                                    dist2 * cosmags) / (dist2 * dist2 * dist)
+        for ii in range(3):
+            fwd[3 * ri + ii] = np.bincount(bins, weights=sum_[:, ii],
+                                           minlength=len(n_int))
+    fwd *= 1e-7
+    return fwd
+
+
+# #############################################################################
+# MAIN TRIAGING FUNCTION
+
+ at verbose
+def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
+    """Precompute and store some things that are used for both MEG and EEG.
+
+    Calculation includes multiplication factors, coordinate transforms,
+    compensations, and forward solutions. All are stored in modified fwd_data.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole source positions in head coordinates
+    bem : dict
+        Boundary Element Model information
+    fwd_data : dict
+        Dict containing sensor information. Gets updated here with BEM and
+        sensor information for later foward calculations
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+    """
+
+    bem_rr = mults = mri_Q = head_mri_t = None
+    if not bem['is_sphere']:
+        if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
+            raise RuntimeError('only linear collocation supported')
+        # Store (and apply soon) μ_0/(4π) factor before source computations
+        mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
+                          [len(s['rr']) for s in bem['surfs']])[np.newaxis, :]
+        # Get positions of BEM points for every surface
+        bem_rr = np.concatenate([s['rr'] for s in bem['surfs']])
+
+        # The dipole location and orientation must be transformed
+        head_mri_t = bem['head_mri_t']
+        mri_Q = apply_trans(bem['head_mri_t']['trans'], np.eye(3), False)
+
+    # Compute solution and compensation for dif sensor types ('meg', 'eeg')
+    if len(set(fwd_data['coil_types'])) != len(fwd_data['coil_types']):
+        raise RuntimeError('Non-unique sensor types found')
+    compensators, solutions, csolutions = [], [], []
+    for coil_type, coils, ccoils, info in zip(fwd_data['coil_types'],
+                                              fwd_data['coils_list'],
+                                              fwd_data['ccoils_list'],
+                                              fwd_data['infos']):
+        compensator = solution = csolution = None
+        if len(coils) > 0:  # Only proceed if sensors exist
             if coil_type == 'meg':
                 # Compose a compensation data set if necessary
                 compensator = _make_ctf_comp_coils(info, coils)
 
-                # Field computation matrices...
-                logger.info('')
-                start = 'Composing the field computation matrix'
-                logger.info(start + '...')
-                solution = _bem_specify_coils(bem, coils, cf, n_jobs)
-                if compensator is not None:
-                    logger.info(start + ' (compensation coils)...')
-                    csolution = _bem_specify_coils(bem, ccoils, ccf, n_jobs)
-
-            elif coil_type == 'eeg':
-                solution = _bem_specify_els(bem, coils)
-                compensator = None
-
-            # Do the actual calculation
-            logger.info('Computing %s at %d source locations '
-                        '(free orientations)...'
-                        % (coil_type.upper(), len(rr)))
-            # Note: this function modifies "solution" in-place
-            B = _bem_pot_or_field(rr, mri_rr, mri_Q, mults, coils,
-                                  solution, srr, n_jobs, coil_type)
-
-            # Compensate if needed (only done for MEG systems w/compensation)
-            if compensator is not None:
-                # Compute the field in the compensation coils
-                work = _bem_pot_or_field(rr, mri_rr, mri_Q, mults,
-                                         ccoils, csolution, srr, n_jobs,
-                                         coil_type)
-                # Combine solutions so we can do the compensation
-                both = np.zeros((work.shape[0], B.shape[1] + work.shape[1]))
-                picks = pick_types(info, meg=True, ref_meg=False)
-                both[:, picks] = B
-                picks = pick_types(info, meg=False, ref_meg=True)
-                both[:, picks] = work
-                B = np.dot(both, compensator.T)
-            Bs.append(B)
+            if not bem['is_sphere']:
+                if coil_type == 'meg':
+                    # MEG field computation matrices for BEM
+                    start = 'Composing the field computation matrix'
+                    logger.info('\n' + start + '...')
+                    cf = FIFF.FIFFV_COORD_HEAD
+                    # multiply solution by "mults" here for simplicity
+                    solution = _bem_specify_coils(bem, coils, cf, mults,
+                                                  n_jobs)
+                    if compensator is not None:
+                        logger.info(start + ' (compensation coils)...')
+                        csolution = _bem_specify_coils(bem, ccoils, cf,
+                                                       mults, n_jobs)
+                else:
+                    # Compute solution for EEG sensor
+                    solution = _bem_specify_els(bem, coils, mults)
+            else:
+                solution = bem
+                if coil_type == 'eeg':
+                    logger.info('Using the equivalent source approach in the '
+                                'homogeneous sphere for EEG')
+        compensators.append(compensator)
+        solutions.append(solution)
+        csolutions.append(csolution)
+
+    # Get appropriate forward physics function depending on sphere or BEM model
+    fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field
+
+    # Update fwd_data with
+    #    bem_rr (3D BEM vertex positions)
+    #    mri_Q (3x3 Head->MRI coord transformation applied to identity matrix)
+    #    head_mri_t (head->MRI coord transform dict)
+    #    fun (_bem_pot_or_field if not 'sphere'; otherwise _sph_pot_or_field)
+    #    solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices),
+    #                            ndarray, shape (n_EEG_sens, n BEM vertices)]
+    #    csolutions (compensation for solution)
+    fwd_data.update(dict(bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t,
+                         compensators=compensators, solutions=solutions,
+                         csolutions=csolutions, fun=fun))
+
+
+ at verbose
+def _compute_forwards_meeg(rr, fd, n_jobs, verbose=None):
+    """Compute MEG and EEG forward solutions for all sensor types.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_dipoles, 3)
+        3D dipole positions in head coordinates
+    fd : dict
+        Dict containing forward data after update in _prep_field_computation
+    n_jobs : int
+        Number of jobs to run in parallel
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+
+    Returns
+    -------
+    Bs : list
+        Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
+        n_sensors depends on which channel types are requested (MEG and/or EEG)
+    """
+
+    n_jobs = max(min(n_jobs, len(rr)), 1)
+    Bs = list()
+    # The dipole location and orientation must be transformed to mri coords
+    mri_rr = None
+    if fd['head_mri_t'] is not None:
+        mri_rr = apply_trans(fd['head_mri_t']['trans'], rr)
+    mri_Q, bem_rr, fun = fd['mri_Q'], fd['bem_rr'], fd['fun']
+    for ci in range(len(fd['coils_list'])):
+        coils, ccoils = fd['coils_list'][ci], fd['ccoils_list'][ci]
+        if len(coils) == 0:  # nothing to do
+            Bs.append(np.zeros((3 * len(rr), 0)))
+            continue
+
+        coil_type, compensator = fd['coil_types'][ci], fd['compensators'][ci]
+        solution, csolution = fd['solutions'][ci], fd['csolutions'][ci]
+        info = fd['infos'][ci]
+
+        # Do the actual forward calculation for a list MEG/EEG sensors
+        logger.info('Computing %s at %d source location%s '
+                    '(free orientations)...'
+                    % (coil_type.upper(), len(rr),
+                       '' if len(rr) == 1 else 's'))
+        # Calculate foward solution using spherical or BEM model
+        B = fun(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
+                coil_type)
+
+        # Compensate if needed (only done for MEG systems w/compensation)
+        if compensator is not None:
+            # Compute the field in the compensation sensors
+            work = fun(rr, mri_rr, mri_Q, ccoils, csolution, bem_rr,
+                       n_jobs, coil_type)
+            # Combine solutions so we can do the compensation
+            both = np.zeros((work.shape[0], B.shape[1] + work.shape[1]))
+            picks = pick_types(info, meg=True, ref_meg=False)
+            both[:, picks] = B
+            picks = pick_types(info, meg=False, ref_meg=True)
+            both[:, picks] = work
+            B = np.dot(both, compensator.T)
+        Bs.append(B)
+    return Bs
+
+
+ at verbose
+def _compute_forwards(rr, bem, coils_list, ccoils_list, infos, coil_types,
+                      n_jobs, verbose=None):
+    """Compute the MEG and EEG forward solutions.
+
+    This effectively combines compute_forward_meg and compute_forward_eeg
+    from MNE-C.
+
+    Parameters
+    ----------
+    rr : ndarray, shape (n_sources, 3)
+        3D dipole in head coordinates
+    bem : dict
+        Boundary Element Model information for all surfaces
+    coils_list : list
+        List of MEG and/or EEG sensor information dicts
+    ccoils_list : list
+        Optional list of MEG compensation information
+    coil_types : list of str
+        Sensor types. May contain 'meg' and/or 'eeg'
+    n_jobs: int
+        Number of jobs to run in parallel
+    infos : list, len(2)
+        infos[0] is MEG info, infos[1] is EEG info
+
+    Returns
+    -------
+    Bs : list of ndarray
+        Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
+        n_sensors depends on which channel types are requested (MEG and/or EEG)
+    """
 
+    # Split calculation into two steps to save (potentially) a lot of time
+    # when e.g. dipole fitting
+    fwd_data = dict(coils_list=coils_list, ccoils_list=ccoils_list,
+                    infos=infos, coil_types=coil_types)
+    _prep_field_computation(rr, bem, fwd_data, n_jobs)
+    Bs = _compute_forwards_meeg(rr, fwd_data, n_jobs)
     return Bs
diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py
index 47ef1dd..88d3802 100644
--- a/mne/forward/_field_interpolation.py
+++ b/mne/forward/_field_interpolation.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
 import numpy as np
 from scipy import linalg
 from copy import deepcopy
@@ -8,9 +10,10 @@ from ..surface import get_head_surf, get_meg_helmet_surf
 
 from ..io.proj import _has_eeg_average_ref_proj, make_projector
 from ..transforms import transform_surface_to, read_trans, _find_trans
-from ._make_forward import _create_coils
+from ._make_forward import _create_meg_coils, _create_eeg_els, _read_coil_defs
 from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
-                         _get_legen_lut_fast, _get_legen_lut_accurate)
+                         _get_legen_lut_fast, _get_legen_lut_accurate,
+                         _do_cross_dots)
 from ..parallel import check_n_jobs
 from ..utils import logger, verbose
 from ..fixes import partial
@@ -35,6 +38,23 @@ def _ad_hoc_noise(coils, ch_type='meg'):
     return cov
 
 
+def _setup_dots(mode, coils, ch_type):
+    """Setup dot products"""
+    my_origin = np.array([0.0, 0.0, 0.04])
+    int_rad = 0.06
+    noise = _ad_hoc_noise(coils, ch_type)
+    if mode == 'fast':
+        # Use 50 coefficients with nearest-neighbor interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 50)
+        lut_fun = partial(_get_legen_lut_fast, lut=lut)
+    else:  # 'accurate'
+        # Use 100 coefficients with linear interpolation
+        lut, n_fact = _get_legen_table(ch_type, False, 100)
+        lut_fun = partial(_get_legen_lut_accurate, lut=lut)
+
+    return my_origin, int_rad, noise, lut_fun, n_fact
+
+
 def _compute_mapping_matrix(fmd, info):
     """Do the hairy computations"""
     logger.info('preparing the mapping matrix...')
@@ -86,6 +106,121 @@ def _compute_mapping_matrix(fmd, info):
     return mapping_mat
 
 
+def _map_meg_channels(inst, pick_from, pick_to, mode='fast'):
+    """Find mapping from one set of channels to another.
+
+    Parameters
+    ----------
+    inst : mne.io.Raw, mne.Epochs or mne.Evoked
+        The data to interpolate. Must be preloaded.
+    pick_from : array-like of int
+        The channels from which to interpolate.
+    pick_to : array-like of int
+        The channels to which to interpolate.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+
+    Returns
+    -------
+    mapping : array
+        A mapping matrix of shape len(pick_to) x len(pick_from).
+    """
+    info_from = pick_info(inst.info, pick_from, copy=True)
+    info_to = pick_info(inst.info, pick_to, copy=True)
+
+    # no need to apply trans because both from and to coils are in device
+    # coordinates
+    templates = _read_coil_defs()
+    coils_from = _create_meg_coils(info_from['chs'], 'normal',
+                                   info_from['dev_head_t'], templates)
+    coils_to = _create_meg_coils(info_to['chs'], 'normal',
+                                 info_to['dev_head_t'], templates)
+    miss = 1e-4  # Smoothing criterion for MEG
+
+    #
+    # Step 2. Calculate the dot products
+    #
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from,
+                                                             'meg')
+    logger.info('Computing dot products for %i coils...' % (len(coils_from)))
+    self_dots = _do_self_dots(int_rad, False, coils_from, my_origin, 'meg',
+                              lut_fun, n_fact, n_jobs=1)
+    logger.info('Computing cross products for coils %i x %i coils...'
+                % (len(coils_from), len(coils_to)))
+    cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to,
+                                my_origin, 'meg', lut_fun, n_fact).T
+
+    ch_names = [c['ch_name'] for c in info_from['chs']]
+    fmd = dict(kind='meg', ch_names=ch_names,
+               origin=my_origin, noise=noise, self_dots=self_dots,
+               surface_dots=cross_dots, int_rad=int_rad, miss=miss)
+    logger.info('Field mapping data ready')
+
+    #
+    # Step 3. Compute the mapping matrix
+    #
+    fmd['data'] = _compute_mapping_matrix(fmd, info_from)
+
+    return fmd['data']
+
+
+def _as_meg_type_evoked(evoked, ch_type='grad', mode='fast'):
+    """Compute virtual evoked using interpolated fields in mag/grad channels.
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked object.
+    ch_type : str
+        The destination channel type. It can be 'mag' or 'grad'.
+    mode : str
+        Either `'accurate'` or `'fast'`, determines the quality of the
+        Legendre polynomial expansion used. `'fast'` should be sufficient
+        for most applications.
+
+    Returns
+    -------
+    evoked : instance of mne.Evoked
+        The transformed evoked object containing only virtual channels.
+    """
+    evoked = evoked.copy()
+
+    if ch_type not in ['mag', 'grad']:
+        raise ValueError('to_type must be "mag" or "grad", not "%s"'
+                         % ch_type)
+    # pick the original and destination channels
+    pick_from = pick_types(evoked.info, meg=True, eeg=False,
+                           ref_meg=False)
+    pick_to = pick_types(evoked.info, meg=ch_type, eeg=False,
+                         ref_meg=False)
+
+    if len(pick_to) == 0:
+        raise ValueError('No channels matching the destination channel type'
+                         ' found in info. Please pass an evoked containing'
+                         'both the original and destination channels. Only the'
+                         ' locations of the destination channels will be used'
+                         ' for interpolation.')
+
+    mapping = _map_meg_channels(evoked, pick_from, pick_to, mode='fast')
+
+    # compute evoked data by multiplying by the 'gain matrix' from
+    # original sensors to virtual sensors
+    data = np.dot(mapping, evoked.data[pick_from])
+
+    # keep only the destination channel types
+    evoked.pick_types(meg=ch_type, eeg=False, ref_meg=False)
+    evoked.data = data
+
+    # change channel names to emphasize they contain interpolated data
+    for ch in evoked.info['chs']:
+        ch['ch_name'] += '_virtual'
+    evoked.info['ch_names'] = [ch['ch_name'] for ch in evoked.info['chs']]
+
+    return evoked
+
+
 @verbose
 def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
                           n_jobs=1, verbose=None):
@@ -118,7 +253,7 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
         A n_vertices x n_sensors array that remaps the MEG or EEG data,
         as `new_data = np.dot(mapping, data)`.
     """
-    if not all([key in surf for key in ['rr', 'nn']]):
+    if not all(key in surf for key in ['rr', 'nn']):
         raise KeyError('surf must have both "rr" and "nn"')
     if 'coord_frame' not in surf:
         raise KeyError('The surface coordinate frame must be specified '
@@ -127,12 +262,7 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
         raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
 
     # deal with coordinate frames here -- always go to "head" (easiest)
-    if surf['coord_frame'] == FIFF.FIFFV_COORD_MRI:
-        if trans is None or FIFF.FIFFV_COORD_MRI not in [trans['to'],
-                                                         trans['from']]:
-            raise ValueError('trans must be a Head<->MRI transform if the '
-                             'surface is not in head coordinates.')
-        surf = transform_surface_to(deepcopy(surf), 'head', trans)
+    surf = transform_surface_to(deepcopy(surf), 'head', trans)
 
     n_jobs = check_n_jobs(n_jobs)
 
@@ -150,34 +280,24 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
         logger.info('Prepare EEG mapping...')
     if len(picks) == 0:
         raise RuntimeError('cannot map, no channels found')
-    chs = pick_info(info, picks)['chs']
+    chs = pick_info(info, picks, copy=True)['chs']
 
     # create coil defs in head coordinates
     if ch_type == 'meg':
         # Put them in head coordinates
-        coils = _create_coils(chs, FIFF.FWD_COIL_ACCURACY_NORMAL,
-                              info['dev_head_t'], coil_type='meg')[0]
+        coils = _create_meg_coils(chs, 'normal', info['dev_head_t'])
         type_str = 'coils'
         miss = 1e-4  # Smoothing criterion for MEG
     else:  # EEG
-        coils = _create_coils(chs, coil_type='eeg')[0]
+        coils = _create_eeg_els(chs)
         type_str = 'electrodes'
         miss = 1e-3  # Smoothing criterion for EEG
 
     #
     # Step 2. Calculate the dot products
     #
-    my_origin = np.array([0.0, 0.0, 0.04])
-    int_rad = 0.06
-    noise = _ad_hoc_noise(coils, ch_type)
-    if mode == 'fast':
-        # Use 50 coefficients with nearest-neighbor interpolation
-        lut, n_fact = _get_legen_table(ch_type, False, 50)
-        lut_fun = partial(_get_legen_lut_fast, lut=lut)
-    else:  # 'accurate'
-        # Use 100 coefficients with linear interpolation
-        lut, n_fact = _get_legen_table(ch_type, False, 100)
-        lut_fun = partial(_get_legen_lut_accurate, lut=lut)
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils,
+                                                             ch_type)
     logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
     self_dots = _do_self_dots(int_rad, False, coils, my_origin, ch_type,
                               lut_fun, n_fact, n_jobs)
@@ -207,15 +327,16 @@ def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
     return fmd
 
 
-def make_field_map(evoked, trans_fname='auto', subject=None, subjects_dir=None,
-                   ch_type=None, mode='fast', n_jobs=1):
+def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None,
+                   ch_type=None, mode='fast', meg_surf='helmet',
+                   n_jobs=1):
     """Compute surface maps used for field display in 3D
 
     Parameters
     ----------
     evoked : Evoked | Epochs | Raw
         The measurement file. Need to have info attribute.
-    trans_fname : str | 'auto' | None
+    trans : str | 'auto' | None
         The full path to the `*-trans.fif` file produced during
         coregistration. If present or found using 'auto'
         the maps will be in MRI coordinates.
@@ -233,6 +354,9 @@ def make_field_map(evoked, trans_fname='auto', subject=None, subjects_dir=None,
         Either `'accurate'` or `'fast'`, determines the quality of the
         Legendre polynomial expansion used. `'fast'` should be sufficient
         for most applications.
+    meg_surf : str
+        Should be ``'helmet'`` or ``'head'`` to specify in which surface
+        to compute the MEG field map. The default value is ``'helmet'``
     n_jobs : int
         The number of jobs to run in parallel.
 
@@ -252,24 +376,27 @@ def make_field_map(evoked, trans_fname='auto', subject=None, subjects_dir=None,
                              % ch_type)
         types = [ch_type]
 
-    if trans_fname == 'auto':
+    if trans == 'auto':
         # let's try to do this in MRI coordinates so they're easy to plot
-        trans_fname = _find_trans(subject, subjects_dir)
+        trans = _find_trans(subject, subjects_dir)
 
-    if 'eeg' in types and trans_fname is None:
+    if 'eeg' in types and trans is None:
         logger.info('No trans file available. EEG data ignored.')
         types.remove('eeg')
 
     if len(types) == 0:
         raise RuntimeError('No data available for mapping.')
 
-    trans = None
-    if trans_fname is not None:
-        trans = read_trans(trans_fname)
+    if trans is not None:
+        trans = read_trans(trans)
+
+    if meg_surf not in ['helmet', 'head']:
+        raise ValueError('Surface to plot MEG fields must be '
+                         '"helmet" or "head"')
 
     surfs = []
     for this_type in types:
-        if this_type == 'meg':
+        if this_type == 'meg' and meg_surf == 'helmet':
             surf = get_meg_helmet_surf(info, trans)
         else:
             surf = get_head_surf(subject, subjects_dir=subjects_dir)
diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py
index ecc1153..f0f4d15 100644
--- a/mne/forward/_lead_dots.py
+++ b/mne/forward/_lead_dots.py
@@ -1,3 +1,9 @@
+# Authors: Eric Larson <larsoner at uw.edu>
+#          Mainak Jas <mainak.jas at telecom-paristech.fr>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
 import os
 from os import path as op
 
@@ -37,9 +43,8 @@ def _get_legen_der(xx, n_coeff=100):
         p0ds[:2] = [0.0, 1.0]
         p0dds[:2] = [0.0, 0.0]
         for n in range(2, n_coeff):
-            p0s[n], p0ds[n], p0dds[n] = _next_legen_der(n, x, p0s[n - 1],
-                                            p0s[n - 2], p0ds[n - 1],
-                                            p0dds[n - 1])
+            p0s[n], p0ds[n], p0dds[n] = _next_legen_der(
+                n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1])
     return coeffs
 
 
@@ -81,8 +86,8 @@ def _get_legen_table(ch_type, volume_integral=False, n_coeff=100,
     if ch_type == 'meg':
         n_facts = list()  # multn, then mult, then multn * (n + 1)
         if volume_integral:
-            n_facts.append(n_fact / ((2.0 * n_fact + 1.0)
-                                     * (2.0 * n_fact + 3.0)))
+            n_facts.append(n_fact / ((2.0 * n_fact + 1.0) *
+                                     (2.0 * n_fact + 3.0)))
         else:
             n_facts.append(n_fact / (2.0 * n_fact + 1.0))
         n_facts.append(n_facts[0] / (n_fact + 1.0))
@@ -140,7 +145,26 @@ def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
 
 
 def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
-    """Lead field dot products using Legendre polynomial (P_n) series"""
+    """Lead field dot products using Legendre polynomial (P_n) series.
+
+    Parameters
+    ----------
+    beta : array, shape (n_points * n_points, 1)
+        Coefficients of the integration.
+    ctheta : array, shape (n_points * n_points, 1)
+        Cosine of the angle between the sensor integration points.
+    lut_fun : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    volume_integral : bool
+        If True, compute volume integral.
+
+    Returns
+    -------
+    sums : array, shape (4, n_points * n_points)
+        The results.
+    """
     # Compute the sums occurring in the evaluation.
     # Two point magnetometers on the xz plane are assumed.
     # The four sums are:
@@ -163,7 +187,43 @@ def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
 
 def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
                         w1, w2, volume_integral, lut, n_fact, ch_type):
-    """Lead field dot product computation for M/EEG in the sphere model"""
+    """Lead field dot product computation for M/EEG in the sphere model.
+
+    Parameters
+    ----------
+    r : float
+        The integration radius. It is used to calculate beta as:
+        beta = (r * r) / (lr1 * lr2).
+    rr1 : array, shape (n_points x 3)
+        Normalized position vectors of integrations points in first sensor.
+    rr2 : array, shape (n_points x 3)
+        Normalized position vector of integration points in second sensor.
+    lr1 : array, shape (n_points x 1)
+        Magnitude of position vector of integration points in first sensor.
+    lr2 : array, shape (n_points x 1)
+        Magnitude of position vector of integration points in second sensor.
+    cosmags1 : array, shape (n_points x 1)
+        Direction of integration points in first sensor.
+    cosmags2 : array, shape (n_points x 1)
+        Direction of integration points in second sensor.
+    w1 : array, shape (n_points x 1)
+        Weights of integration points in the first sensor.
+    w2 : array, shape (n_points x 1)
+        Weights of integration points in the second sensor.
+    volume_integral : bool
+        If True, compute volume integral.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+
+    Returns
+    -------
+    result : float
+        The integration sum.
+    """
     ct = np.einsum('ik,jk->ij', rr1, rr2)  # outer product, sum over coords
 
     # expand axes
@@ -178,13 +238,13 @@ def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
         sums.shape = (4,) + beta.shape
 
         # Accumulate the result, a little bit streamlined version
-        #cosmags1 = cosmags1[:, np.newaxis, :]
-        #cosmags2 = cosmags2[np.newaxis, :, :]
-        #n1c1 = np.sum(cosmags1 * rr1, axis=2)
-        #n1c2 = np.sum(cosmags1 * rr2, axis=2)
-        #n2c1 = np.sum(cosmags2 * rr1, axis=2)
-        #n2c2 = np.sum(cosmags2 * rr2, axis=2)
-        #n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
+        # cosmags1 = cosmags1[:, np.newaxis, :]
+        # cosmags2 = cosmags2[np.newaxis, :, :]
+        # n1c1 = np.sum(cosmags1 * rr1, axis=2)
+        # n1c2 = np.sum(cosmags1 * rr2, axis=2)
+        # n2c1 = np.sum(cosmags2 * rr1, axis=2)
+        # n2c2 = np.sum(cosmags2 * rr2, axis=2)
+        # n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
         n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
         n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
         n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
@@ -212,16 +272,42 @@ def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
         result = eeg_const * sums / lr1lr2
     # new we add them all up with weights
     if w1 is None:  # operating on surface, treat independently
-        #result = np.sum(w2[np.newaxis, :] * result, axis=1)
+        # result = np.sum(w2[np.newaxis, :] * result, axis=1)
         result = np.dot(result, w2)
     else:
-        #result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
+        # result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
         result = np.einsum('i,j,ij', w1, w2, result)
     return result
 
 
 def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
-    """Perform the lead field dot product integrations"""
+    """Perform the lead field dot product integrations.
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2).
+    volume : bool
+        If True, perform volume integral.
+    coils : list of dict
+        The coils.
+    r0 : array, shape (3 x 1)
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
     if ch_type == 'eeg':
         intrad *= 0.7
     # convert to normalized distances from expansion center
@@ -241,6 +327,7 @@ def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
 def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
                          n_fact, ch_type, idx):
     """Helper for parallelization"""
+    # all possible combinations of two magnetometers
     products = np.zeros((len(rmags), len(rmags)))
     for ci1 in idx:
         for ci2 in range(0, ci1 + 1):
@@ -254,10 +341,97 @@ def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
     return products
 
 
+def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type,
+                   lut, n_fact):
+    """Compute lead field dot product integrations between two coil sets.
+
+    The code is a direct translation of MNE-C code found in
+    `mne_map_data/lead_dots.c`.
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2).
+    volume : bool
+        If True, compute volume integral.
+    coils1 : list of dict
+        The original coils.
+    coils2 : list of dict
+        The coils to which data is being mapped.
+    r0 : array, shape (3 x 1).
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
+    rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1]
+    rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2]
+
+    rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1]
+    rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2]
+
+    rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)]
+    rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)]
+
+    ws1 = [coil['w'] for coil in coils1]
+    ws2 = [coil['w'] for coil in coils2]
+
+    cosmags1 = [coil['cosmag'] for coil in coils1]
+    cosmags2 = [coil['cosmag'] for coil in coils2]
+
+    products = np.zeros((len(rmags1), len(rmags2)))
+    for ci1 in range(len(coils1)):
+        for ci2 in range(len(coils2)):
+            res = _fast_sphere_dot_r0(intrad, rmags1[ci1], rmags2[ci2],
+                                      rlens1[ci1], rlens2[ci2], cosmags1[ci1],
+                                      cosmags2[ci2], ws1[ci1], ws2[ci2],
+                                      volume, lut, n_fact, ch_type)
+            products[ci1, ci2] = res
+    return products
+
+
 def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
                      lut, n_fact, n_jobs):
-    """Compute the map construction products"""
-    virt_ref = False
+    """Compute the map construction products
+
+    Parameters
+    ----------
+    intrad : float
+        The integration radius. It is used to calculate beta as:
+        beta = (intrad * intrad) / (r1 * r2)
+    volume : bool
+        If True, compute a volume integral.
+    coils : list of dict
+        The coils.
+    surf : dict
+        The surface on which the field is interpolated.
+    sel : array
+        Indices of the surface vertices to select.
+    r0 : array, shape (3 x 1)
+        The origin of the sphere.
+    ch_type : str
+        The channel type. It can be 'meg' or 'eeg'.
+    lut : callable
+        Look-up table for Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    n_jobs : int
+        Number of jobs to run in parallel.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
     # convert to normalized distances from expansion center
     rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
     rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
@@ -266,18 +440,22 @@ def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
     ws = [coil['w'] for coil in coils]
     rref = None
     refl = None
+    # virt_ref = False
     if ch_type == 'eeg':
         intrad *= 0.7
-        if virt_ref:
-            rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
-            refl = np.sqrt(np.sum(rref * rref, axis=1))
-            rref /= refl[:, np.newaxis]
+        # The virtual ref code is untested and unused, so it is
+        # commented out for now
+        # if virt_ref:
+        #     rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
+        #     refl = np.sqrt(np.sum(rref * rref, axis=1))
+        #     rref /= refl[:, np.newaxis]
 
     rsurf = surf['rr'][sel] - r0[np.newaxis, :]
     lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))
     rsurf /= lsurf[:, np.newaxis]
     this_nn = surf['nn'][sel]
 
+    # loop over the coils
     parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)
     prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
                            this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
@@ -290,7 +468,39 @@ def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
 def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
                             this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
                             idx):
-    """Helper for parallelization"""
+    """Helper for parallelization.
+
+    Parameters
+    ----------
+    refl : array | None
+        If ch_type is 'eeg', the magnitude of position vector of the
+        virtual reference (never used).
+    lsurf : array
+        Magnitude of position vector of the surface points.
+    rlens : list of arrays of length n_coils
+        Magnitude of position vector.
+    this_nn : array, shape (n_vertices, 3)
+        Surface normals.
+    cosmags : list of array.
+        Direction of the integration points in the coils.
+    ws : list of array
+        Integration weights of the coils.
+    volume : bool
+        If True, compute volume integral.
+    lut : callable
+        Look-up table for evaluating Legendre polynomials.
+    n_fact : array
+        Coefficients in the integration sum.
+    ch_type : str
+        'meg' or 'eeg'
+    idx : array, shape (n_coils x 1)
+        Index of coil.
+
+    Returns
+    -------
+    products : array, shape (n_coils, n_coils)
+        The integration products.
+    """
     products = np.zeros((len(rsurf), len(rmags)))
     for ci in idx:
         res = _fast_sphere_dot_r0(intrad, rsurf, rmags[ci],
@@ -299,11 +509,13 @@ def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
                                   None, ws[ci], volume, lut,
                                   n_fact, ch_type)
         if rref is not None:
-            vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
-                                       refl, rlens[ci],
-                                       None, ws[ci], volume,
-                                       lut, n_fact, ch_type)
-            products[:, ci] = res - vres
+            raise NotImplementedError  # we don't ever use this, isn't tested
+            # vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
+            #                            refl, rlens[ci],
+            #                            this_nn, cosmags[ci],
+            #                            None, ws[ci], volume, lut,
+            #                            n_fact, ch_type)
+            # products[:, ci] = res - vres
         else:
             products[:, ci] = res
     return products
diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py
index f4bb23d..2d96811 100644
--- a/mne/forward/_make_forward.py
+++ b/mne/forward/_make_forward.py
@@ -5,30 +5,62 @@
 #
 # License: BSD (3-clause)
 
-from ..externals.six import string_types
 import os
 from os import path as op
 import numpy as np
 
 from .. import pick_types, pick_info
 from ..io.pick import _has_kit_refs
-from ..io import read_info
+from ..io import read_info, _loc_to_coil_trans, _loc_to_eeg_loc
+from ..io.meas_info import Info
 from ..io.constants import FIFF
 from .forward import Forward, write_forward_solution, _merge_meg_eeg_fwds
 from ._compute_forward import _compute_forwards
-from ..transforms import (invert_transform, transform_surface_to,
-                          read_trans, _get_mri_head_t_from_trans_file,
-                          apply_trans, _print_coord_trans, _coord_frame_name)
+from ..transforms import (_ensure_trans, transform_surface_to, apply_trans,
+                          _get_mri_head_t, _print_coord_trans,
+                          _coord_frame_name, Transform)
 from ..utils import logger, verbose
-from ..source_space import (read_source_spaces, _filter_source_spaces,
-                            SourceSpaces)
-from ..surface import read_bem_solution, _normalize_vectors
+from ..source_space import _ensure_src, _filter_source_spaces
+from ..surface import _normalize_vectors
+from ..bem import read_bem_solution, _bem_find_surface, ConductorModel
+from ..externals.six import string_types
+
+
+_accuracy_dict = dict(normal=FIFF.FWD_COIL_ACCURACY_NORMAL,
+                      accurate=FIFF.FWD_COIL_ACCURACY_ACCURATE)
+
 
+ at verbose
+def _read_coil_defs(fname=None, elekta_defs=False, verbose=None):
+    """Read a coil definition file.
+
+    Parameters
+    ----------
+    fname : str
+        The name of the file from which coil definitions are read.
+    elekta_defs : bool
+        If true, use Elekta's coil definitions for numerical integration
+        (from Abramowitz and Stegun section 25.4.62).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
 
-def _read_coil_defs(fname=None):
-    """Read a coil definition file"""
+    Returns
+    -------
+    res : list of dict
+        The coils. It is a dictionary with valid keys:
+        'cosmag' | 'coil_class' | 'coord_frame' | 'rmag' | 'type' |
+        'chname' | 'accuracy'.
+        cosmag contains the direction of the coils and rmag contains the
+        position vector.
+    """
     if fname is None:
-        fname = op.join(op.split(__file__)[0], '..', 'data', 'coil_def.dat')
+        if not elekta_defs:
+            fname = op.join(op.split(__file__)[0], '..', 'data',
+                            'coil_def.dat')
+        else:
+            fname = op.join(op.split(__file__)[0], '..', 'data',
+                            'coil_def_Elekta.dat')
     big_val = 0.5
     with open(fname, 'r') as fid:
         lines = fid.readlines()
@@ -38,7 +70,7 @@ def _read_coil_defs(fname=None):
             line = lines.pop()
             if line[0] != '#':
                 vals = np.fromstring(line, sep=' ')
-                assert len(vals) == 7
+                assert len(vals) in (6, 7)  # newer numpy can truncate comment
                 start = line.find('"')
                 end = len(line.strip()) - 1
                 assert line.strip()[end] == '"'
@@ -79,95 +111,330 @@ def _read_coil_defs(fname=None):
 def _create_meg_coil(coilset, ch, acc, t):
     """Create a coil definition using templates, transform if necessary"""
     # Also change the coordinate frame if so desired
+    if t is None:
+        t = Transform('meg', 'meg', np.eye(4))  # identity, no change
 
     if ch['kind'] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]:
         raise RuntimeError('%s is not a MEG channel' % ch['ch_name'])
 
     # Simple linear search from the coil definitions
-    d = None
     for coil in coilset['coils']:
         if coil['coil_type'] == (ch['coil_type'] & 0xFFFF) and \
                 coil['accuracy'] == acc:
-            d = coil
-
-    if d is None:
+            break
+    else:
         raise RuntimeError('Desired coil definition not found '
                            '(type = %d acc = %d)' % (ch['coil_type'], acc))
 
-    # Create the result
-    res = dict(chname=ch['ch_name'], desc=None, coil_class=d['coil_class'],
-               accuracy=d['accuracy'], base=d['base'], size=d['size'],
-               type=ch['coil_type'], w=d['w'])
-
-    if d['desc']:
-        res['desc'] = d['desc']
-
     # Apply a coordinate transformation if so desired
-    coil_trans = ch['coil_trans'].copy()  # make sure we don't botch it
-    if t is not None:
-        coil_trans = np.dot(t['trans'], coil_trans)
-        res['coord_frame'] = t['to']
-    else:
-        res['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+    coil_trans = np.dot(t['trans'], _loc_to_coil_trans(ch['loc']))
 
-    res['rmag'] = apply_trans(coil_trans, d['rmag'])
-    res['cosmag'] = apply_trans(coil_trans, d['cosmag'], False)
+    # Create the result
+    res = dict(chname=ch['ch_name'], coil_class=coil['coil_class'],
+               accuracy=coil['accuracy'], base=coil['base'], size=coil['size'],
+               type=ch['coil_type'], w=coil['w'], desc=coil['desc'],
+               coord_frame=t['to'], rmag=apply_trans(coil_trans, coil['rmag']),
+               cosmag=apply_trans(coil_trans, coil['cosmag'], False))
     res.update(ex=coil_trans[:3, 0], ey=coil_trans[:3, 1],
                ez=coil_trans[:3, 2], r0=coil_trans[:3, 3])
     return res
 
 
-def _create_eeg_el(ch, t):
+def _create_eeg_el(ch, t=None):
     """Create an electrode definition, transform coords if necessary"""
     if ch['kind'] != FIFF.FIFFV_EEG_CH:
         raise RuntimeError('%s is not an EEG channel. Cannot create an '
                            'electrode definition.' % ch['ch_name'])
-    if t is not None and t['from'] != FIFF.FIFFV_COORD_HEAD:
+    if t is None:
+        t = Transform('head', 'head', np.eye(4))  # identity, no change
+    if t.from_str != 'head':
         raise RuntimeError('Inappropriate coordinate transformation')
 
-    r0ex = ch['eeg_loc'][:, :2]
+    r0ex = _loc_to_eeg_loc(ch['loc'])
     if r0ex.shape[1] == 1:  # no reference
         w = np.array([1.])
     else:  # has reference
         w = np.array([1., -1.])
 
     # Optional coordinate transformation
-    r0ex = r0ex.T.copy()
-    if t is not None:
-        r0ex = apply_trans(t['trans'], r0ex)
-        coord_frame = t['to']
-    else:
-        coord_frame = FIFF.FIFFV_COORD_HEAD
+    r0ex = apply_trans(t['trans'], r0ex.T)
 
     # The electrode location
     cosmag = r0ex.copy()
     _normalize_vectors(cosmag)
     res = dict(chname=ch['ch_name'], coil_class=FIFF.FWD_COILC_EEG, w=w,
-               accuracy=FIFF.FWD_COIL_ACCURACY_NORMAL, type=ch['coil_type'],
-               coord_frame=coord_frame, rmag=r0ex, cosmag=cosmag)
+               accuracy=_accuracy_dict['normal'], type=ch['coil_type'],
+               coord_frame=t['to'], rmag=r0ex, cosmag=cosmag)
     return res
 
 
-def _create_coils(chs, acc=None, t=None, coil_type='meg', coilset=None):
-    """Create a set of MEG or EEG coils"""
-    if coilset is None:  # auto-read defs if not supplied
-        coilset = _read_coil_defs()
-    coils = list()
-    if coil_type == 'meg':
-        for ch in chs:
-            coils.append(_create_meg_coil(coilset, ch, acc, t))
-    elif coil_type == 'eeg':
-        for ch in chs:
-            coils.append(_create_eeg_el(ch, t))
+def _create_meg_coils(chs, acc=None, t=None, coilset=None):
+    """Create a set of MEG or EEG coils in the head coordinate frame"""
+    acc = _accuracy_dict[acc] if isinstance(acc, string_types) else acc
+    coilset = _read_coil_defs(verbose=False) if coilset is None else coilset
+    coils = [_create_meg_coil(coilset, ch, acc, t) for ch in chs]
+    return coils
+
+
+def _create_eeg_els(chs):
+    """Create a set of MEG or EEG coils in the head coordinate frame"""
+    return [_create_eeg_el(ch) for ch in chs]
+
+
+ at verbose
+def _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=None):
+    """Set up a BEM for forward computation"""
+    logger.info('')
+    if isinstance(bem, string_types):
+        logger.info('Setting up the BEM model using %s...\n' % bem_extra)
+        bem = read_bem_solution(bem)
+    if not isinstance(bem, ConductorModel):
+        raise TypeError('bem must be a string or ConductorModel')
+    if bem['is_sphere']:
+        logger.info('Using the sphere model.\n')
+        if len(bem['layers']) == 0:
+            raise RuntimeError('Spherical model has zero layers')
+        if bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
+            raise RuntimeError('Spherical model is not in head coordinates')
+    else:
+        if neeg > 0 and len(bem['surfs']) == 1:
+            raise RuntimeError('Cannot use a homogeneous model in EEG '
+                               'calculations')
+        logger.info('Employing the head->MRI coordinate transform with the '
+                    'BEM model.')
+        # fwd_bem_set_head_mri_t: Set the coordinate transformation
+        bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri')
+        logger.info('BEM model %s is now set up' % op.split(bem_extra)[1])
+        logger.info('')
+    return bem
+
+
+ at verbose
+def _prep_meg_channels(info, accurate=True, exclude=(), ignore_ref=False,
+                       elekta_defs=False, verbose=None):
+    """Prepare MEG coil definitions for forward calculation
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement information dictionary
+    accurate : bool
+        If true (default) then use `accurate` coil definitions (more
+        integration points)
+    exclude : list of str | str
+        List of channels to exclude. If 'bads', exclude channels in
+        info['bads']
+    ignore_ref : bool
+        If true, ignore compensation coils
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    megcoils : list of dict
+        Information for each prepped MEG coil
+    compcoils : list of dict
+        Information for each prepped MEG coil
+    megnames : list of str
+        Name of each prepped MEG coil
+    meginfo : Info
+        Information subselected for just the set of MEG coils
+    """
+
+    accuracy = 'accurate' if accurate else 'normal'
+    info_extra = 'info'
+    meg_info = None
+    megnames, megcoils, compcoils = [], [], []
+
+    # Find MEG channels
+    picks = pick_types(info, meg=True, eeg=False, ref_meg=False,
+                       exclude=exclude)
+
+    # Make sure MEG coils exist
+    nmeg = len(picks)
+    if nmeg <= 0:
+        raise RuntimeError('Could not find any MEG channels')
+
+    # Get channel info and names for MEG channels
+    megchs = pick_info(info, picks)['chs']
+    megnames = [info['ch_names'][p] for p in picks]
+    logger.info('Read %3d MEG channels from %s'
+                % (len(picks), info_extra))
+
+    # Get MEG compensation channels
+    if not ignore_ref:
+        picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude)
+        ncomp = len(picks)
+        if (ncomp > 0):
+            compchs = pick_info(info, picks)['chs']
+            logger.info('Read %3d MEG compensation channels from %s'
+                        % (ncomp, info_extra))
+            # We need to check to make sure these are NOT KIT refs
+            if _has_kit_refs(info, picks):
+                err = ('Cannot create forward solution with KIT reference '
+                       'channels. Consider using "ignore_ref=True" in '
+                       'calculation')
+                raise NotImplementedError(err)
     else:
-        raise RuntimeError('unknown coil type')
-    return coils, coils[0]['coord_frame']  # all get the same coord_frame
+        ncomp = 0
+
+    _print_coord_trans(info['dev_head_t'])
+
+    # Make info structure to allow making compensator later
+    ncomp_data = len(info['comps'])
+    ref_meg = True if not ignore_ref else False
+    picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=exclude)
+    meg_info = pick_info(info, picks) if nmeg > 0 else None
+
+    # Create coil descriptions with transformation to head or MRI frame
+    templates = _read_coil_defs(elekta_defs=elekta_defs)
+
+    megcoils = _create_meg_coils(megchs, accuracy, info['dev_head_t'],
+                                 templates)
+    if ncomp > 0:
+        logger.info('%d compensation data sets in %s' % (ncomp_data,
+                                                         info_extra))
+        compcoils = _create_meg_coils(compchs, 'normal', info['dev_head_t'],
+                                      templates)
+    logger.info('Head coordinate MEG coil definitions created.')
+
+    return megcoils, compcoils, megnames, meg_info
+
+
+ at verbose
+def _prep_eeg_channels(info, exclude=(), verbose=None):
+    """Prepare EEG electrode definitions for forward calculation
+
+    Parameters
+    ----------
+    info : instance of Info
+        The measurement information dictionary
+    exclude : list of str | str
+        List of channels to exclude. If 'bads', exclude channels in
+        info['bads']
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to raw.verbose.
+
+    Returns
+    -------
+    eegels : list of dict
+        Information for each prepped EEG electrode
+    eegnames : list of str
+        Name of each prepped EEG electrode
+    """
+    eegnames, eegels = [], []
+    info_extra = 'info'
+
+    # Find EEG electrodes
+    picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                       exclude=exclude)
+
+    # Make sure EEG electrodes exist
+    neeg = len(picks)
+    if neeg <= 0:
+        raise RuntimeError('Could not find any EEG channels')
+
+    # Get channel info and names for EEG channels
+    eegchs = pick_info(info, picks)['chs']
+    eegnames = [info['ch_names'][p] for p in picks]
+    logger.info('Read %3d EEG channels from %s' % (len(picks), info_extra))
+
+    # Create EEG electrode descriptions
+    eegels = _create_eeg_els(eegchs)
+    logger.info('Head coordinate coil definitions created.')
+
+    return eegels, eegnames
+
+
+ at verbose
+def _prepare_for_forward(src, mri_head_t, info, bem, mindist, n_jobs,
+                         bem_extra='', trans='', info_extra='',
+                         meg=True, eeg=True, ignore_ref=False, fname=None,
+                         overwrite=False, verbose=None):
+    """Helper to prepare for forward computation"""
+
+    # Read the source locations
+    logger.info('')
+    # let's make a copy in case we modify something
+    src = _ensure_src(src).copy()
+    nsource = sum(s['nuse'] for s in src)
+    if nsource == 0:
+        raise RuntimeError('No sources are active in these source spaces. '
+                           '"do_all" option should be used.')
+    logger.info('Read %d source spaces a total of %d active source locations'
+                % (len(src), nsource))
+    # Delete some keys to clean up the source space:
+    for key in ['working_dir', 'command_line']:
+        if key in src.info:
+            del src.info[key]
+
+    # Read the MRI -> head coordinate transformation
+    logger.info('')
+    _print_coord_trans(mri_head_t)
+
+    # make a new dict with the relevant information
+    arg_list = [info_extra, trans, src, bem_extra, fname,  meg, eeg,
+                mindist, overwrite, n_jobs, verbose]
+    cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list]))
+    mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0)
+    info = Info(nchan=info['nchan'], chs=info['chs'], comps=info['comps'],
+                ch_names=info['ch_names'], dev_head_t=info['dev_head_t'],
+                mri_file=trans, mri_id=mri_id, meas_file=info_extra,
+                meas_id=None, working_dir=os.getcwd(),
+                command_line=cmd, bads=info['bads'], mri_head_t=mri_head_t)
+    logger.info('')
+
+    megcoils, compcoils, megnames, meg_info = [], [], [], []
+    eegels, eegnames = [], []
+
+    if meg and len(pick_types(info, ref_meg=False, exclude=[])) > 0:
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, ignore_ref=ignore_ref)
+    if eeg and len(pick_types(info, meg=False, eeg=True, ref_meg=False,
+                              exclude=[])) > 0:
+        eegels, eegnames = _prep_eeg_channels(info)
+
+    # Check that some channels were found
+    if len(megcoils + eegels) == 0:
+        raise RuntimeError('No MEG or EEG channels found.')
+
+    # pick out final info
+    info = pick_info(info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False,
+                                      exclude=[]))
+
+    # Transform the source spaces into the appropriate coordinates
+    # (will either be HEAD or MRI)
+    for s in src:
+        transform_surface_to(s, 'head', mri_head_t)
+    logger.info('Source spaces are now in %s coordinates.'
+                % _coord_frame_name(s['coord_frame']))
+
+    # Prepare the BEM model
+    bem = _setup_bem(bem, bem_extra, len(eegnames), mri_head_t)
+
+    # Circumvent numerical problems by excluding points too close to the skull
+    if not bem['is_sphere']:
+        inner_skull = _bem_find_surface(bem, 'inner_skull')
+        _filter_source_spaces(inner_skull, mindist, mri_head_t, src, n_jobs)
+        logger.info('')
+
+    rr = np.concatenate([s['rr'][s['vertno']] for s in src])
+
+    # deal with free orientations:
+    source_nn = np.tile(np.eye(3), (len(rr), 1))
+    update_kwargs = dict(nchan=len(info['ch_names']), nsource=len(rr),
+                         info=info, src=src, source_nn=source_nn,
+                         source_rr=rr, surf_ori=False, mri_head_t=mri_head_t)
+    return megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, \
+        info, update_kwargs, bem
 
 
 @verbose
-def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
-                          mindist=0.0, ignore_ref=False, overwrite=False,
-                          n_jobs=1, verbose=None):
+def make_forward_solution(info, trans, src, bem, fname=None, meg=True,
+                          eeg=True, mindist=0.0, ignore_ref=False,
+                          overwrite=False, n_jobs=1, verbose=None):
     """Calculate a forward solution for a subject
 
     Parameters
@@ -176,19 +443,19 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
         If str, then it should be a filename to a Raw, Epochs, or Evoked
         file with measurement information. If dict, should be an info
         dict (such as one from Raw, Epochs, or Evoked).
-    mri : dict | str
+    trans : dict | str | None
         Either a transformation filename (usually made using mne_analyze)
         or an info dict (usually opened using read_trans()).
         If string, an ending of `.fif` or `.fif.gz` will be assumed to
         be in FIF format, any other ending will be assumed to be a text
         file with a 4x4 transformation matrix (like the `--trans` MNE-C
-        option).
+        option). Can be None to use the identity transform.
     src : str | instance of SourceSpaces
         If string, should be a source space filename. Can also be an
         instance of loaded or generated SourceSpaces.
-    bem : str
+    bem : dict | str
         Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
-        use.
+        use, or a loaded sphere model (dict).
     fname : str | None
         Destination forward solution filename. If None, the solution
         will not be saved.
@@ -215,6 +482,10 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     fwd : instance of Forward
         The forward solution.
 
+    See Also
+    --------
+    do_forward_solution
+
     Notes
     -----
     Some of the forward solution calculation options from the C code
@@ -223,32 +494,14 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     `do_forward_solution`.
     """
     # Currently not (sup)ported:
-    # 1. EEG Sphere model (not used much)
-    # 2. --grad option (gradients of the field, not used much)
-    # 3. --fixed option (can be computed post-hoc)
-    # 4. --mricoord option (probably not necessary)
-
-    if isinstance(mri, string_types):
-        if not op.isfile(mri):
-            raise IOError('mri file "%s" not found' % mri)
-        if op.splitext(mri)[1] in ['.fif', '.gz']:
-            mri_head_t = read_trans(mri)
-        else:
-            mri_head_t = _get_mri_head_t_from_trans_file(mri)
-    else:  # dict
-        mri_head_t = mri
-        mri = 'dict'
-
-    if not isinstance(src, string_types):
-        if not isinstance(src, SourceSpaces):
-            raise TypeError('src must be a string or SourceSpaces')
-        src_extra = 'list'
-    else:
-        src_extra = src
-        if not op.isfile(src):
-            raise IOError('Source space file "%s" not found' % src)
-    if not op.isfile(bem):
-        raise IOError('BEM file "%s" not found' % bem)
+    # 1. --grad option (gradients of the field, not used much)
+    # 2. --fixed option (can be computed post-hoc)
+    # 3. --mricoord option (probably not necessary)
+
+    # read the transformation from MRI to HEAD coordinates
+    # (could also be HEAD to MRI)
+    mri_head_t, trans = _get_mri_head_t(trans)
+    bem_extra = 'dict' if isinstance(bem, dict) else bem
     if fname is not None and op.isfile(fname) and not overwrite:
         raise IOError('file "%s" exists, consider using overwrite=True'
                       % fname)
@@ -256,224 +509,51 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
         raise TypeError('info should be a dict or string')
     if isinstance(info, string_types):
         info_extra = op.split(info)[1]
-        info_extra_long = info
         info = read_info(info, verbose=False)
     else:
         info_extra = 'info dict'
-        info_extra_long = info_extra
-    arg_list = [info_extra, mri, src_extra, bem, fname,  meg, eeg,
-                mindist, overwrite, n_jobs, verbose]
-    cmd = 'make_forward_solution(%s)' % (', '.join([str(a) for a in arg_list]))
-
-    # this could, in principle, be an option
-    coord_frame = FIFF.FIFFV_COORD_HEAD
 
     # Report the setup
-    mri_extra = mri if isinstance(mri, string_types) else 'dict'
     logger.info('Source space                 : %s' % src)
-    logger.info('MRI -> head transform source : %s' % mri_extra)
-    logger.info('Measurement data             : %s' % info_extra_long)
-    logger.info('BEM model                    : %s' % bem)
-    logger.info('Accurate field computations')
+    logger.info('MRI -> head transform source : %s' % trans)
+    logger.info('Measurement data             : %s' % info_extra)
+    if isinstance(bem, dict) and bem['is_sphere']:
+        logger.info('Sphere model                 : origin at %s mm'
+                    % (bem['r0'],))
+        logger.info('Standard field computations')
+    else:
+        logger.info('BEM model                    : %s' % bem_extra)
+        logger.info('Accurate field computations')
     logger.info('Do computations in %s coordinates',
-                _coord_frame_name(coord_frame))
+                _coord_frame_name(FIFF.FIFFV_COORD_HEAD))
     logger.info('Free source orientations')
     logger.info('Destination for the solution : %s' % fname)
 
-    # Read the source locations
-    logger.info('')
-    if isinstance(src, string_types):
-        logger.info('Reading %s...' % src)
-        src = read_source_spaces(src, verbose=False)
-    else:
-        # let's make a copy in case we modify something
-        src = src.copy()
-    nsource = sum(s['nuse'] for s in src)
-    if nsource == 0:
-        raise RuntimeError('No sources are active in these source spaces. '
-                           '"do_all" option should be used.')
-    logger.info('Read %d source spaces a total of %d active source locations'
-                % (len(src), nsource))
-
-    # Read the MRI -> head coordinate transformation
-    logger.info('')
-
-    # it's actually usually a head->MRI transform, so we probably need to
-    # invert it
-    if mri_head_t['from'] == FIFF.FIFFV_COORD_HEAD:
-        mri_head_t = invert_transform(mri_head_t)
-    if not (mri_head_t['from'] == FIFF.FIFFV_COORD_MRI and
-            mri_head_t['to'] == FIFF.FIFFV_COORD_HEAD):
-        raise RuntimeError('Incorrect MRI transform provided')
-    _print_coord_trans(mri_head_t)
-
-    # make a new dict with the relevant information
-    mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0)
-    info = dict(nchan=info['nchan'], chs=info['chs'], comps=info['comps'],
-                ch_names=info['ch_names'], dev_head_t=info['dev_head_t'],
-                mri_file=mri_extra, mri_id=mri_id, meas_file=info_extra_long,
-                meas_id=None, working_dir=os.getcwd(),
-                command_line=cmd, bads=info['bads'])
-    meg_head_t = info['dev_head_t']
-    logger.info('')
-
-    # MEG channels
-    megnames = None
-    if meg:
-        picks = pick_types(info, meg=True, eeg=False, ref_meg=False,
-                           exclude=[])
-        nmeg = len(picks)
-        if nmeg > 0:
-            megchs = pick_info(info, picks)['chs']
-            megnames = [info['ch_names'][p] for p in picks]
-            logger.info('Read %3d MEG channels from %s'
-                        % (len(picks), info_extra))
-
-        # comp channels
-        if not ignore_ref:
-            picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
-            ncomp = len(picks)
-            if (ncomp > 0):
-                compchs = pick_info(info, picks)['chs']
-                logger.info('Read %3d MEG compensation channels from %s'
-                            % (ncomp, info_extra))
-                # We need to check to make sure these are NOT KIT refs
-                if _has_kit_refs(info, picks):
-                    err = ('Cannot create forward solution with KIT '
-                           'reference channels. Consider using '
-                           '"ignore_ref=True" in calculation')
-                    raise NotImplementedError(err)
-            _print_coord_trans(meg_head_t)
-            # make info structure to allow making compensator later
-        else:
-            ncomp = 0
-        ncomp_data = len(info['comps'])
-        ref_meg = True if not ignore_ref else False
-        picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=[])
-        meg_info = pick_info(info, picks)
-    else:
-        logger.info('MEG not requested. MEG channels omitted.')
-        nmeg = 0
-        meg_info = None
-
-    # EEG channels
-    eegnames = None
-    if eeg:
-        picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
-                           exclude=[])
-        neeg = len(picks)
-        if neeg > 0:
-            eegchs = pick_info(info, picks)['chs']
-            eegnames = [info['ch_names'][p] for p in picks]
-            logger.info('Read %3d EEG channels from %s'
-                        % (len(picks), info_extra))
-    else:
-        neeg = 0
-        logger.info('EEG not requested. EEG channels omitted.')
-
-    if neeg <= 0 and nmeg <= 0:
-        raise RuntimeError('Could not find any MEG or EEG channels')
-
-    # Create coil descriptions with transformation to head or MRI frame
-    templates = _read_coil_defs()
-    if nmeg > 0 and ncomp > 0:  # Compensation channel information
-        logger.info('%d compensation data sets in %s'
-                    % (ncomp_data, info_extra))
-
-    meg_xform = meg_head_t
-    extra_str = 'Head'
-
-    megcoils, megcf, compcoils, compcf = None, None, None, None
-    if nmeg > 0:
-        megcoils, megcf = _create_coils(megchs,
-                                        FIFF.FWD_COIL_ACCURACY_ACCURATE,
-                                        meg_xform, coil_type='meg',
-                                        coilset=templates)
-        if ncomp > 0:
-            compcoils, compcf = _create_coils(compchs,
-                                              FIFF.FWD_COIL_ACCURACY_NORMAL,
-                                              meg_xform, coil_type='meg',
-                                              coilset=templates)
-    eegels = None
-    if neeg > 0:
-        eegels, _ = _create_coils(eegchs, coil_type='eeg')
-    logger.info('%s coordinate coil definitions created.' % extra_str)
-
-    # Transform the source spaces into the appropriate coordinates
-    for s in src:
-        transform_surface_to(s, coord_frame, mri_head_t)
-    logger.info('Source spaces are now in %s coordinates.'
-                % _coord_frame_name(coord_frame))
-
-    # Prepare the BEM model
-    logger.info('')
-    logger.info('Setting up the BEM model using %s...\n' % bem)
-    bem_name = bem
-    bem = read_bem_solution(bem)
-    if neeg > 0 and len(bem['surfs']) == 1:
-        raise RuntimeError('Cannot use a homogeneous model in EEG '
-                           'calculations')
-    logger.info('Employing the head->MRI coordinate transform with the '
-                'BEM model.')
-    # fwd_bem_set_head_mri_t: Set the coordinate transformation
-    to, fro = mri_head_t['to'], mri_head_t['from']
-    if fro == FIFF.FIFFV_COORD_HEAD and to == FIFF.FIFFV_COORD_MRI:
-        bem['head_mri_t'] = mri_head_t
-    elif fro == FIFF.FIFFV_COORD_MRI and to == FIFF.FIFFV_COORD_HEAD:
-        bem['head_mri_t'] = invert_transform(mri_head_t)
-    else:
-        raise RuntimeError('Improper coordinate transform')
-    logger.info('BEM model %s is now set up' % op.split(bem_name)[1])
-    logger.info('')
-
-    # Circumvent numerical problems by excluding points too close to the skull
-    idx = np.where(np.array([s['id'] for s in bem['surfs']])
-                   == FIFF.FIFFV_BEM_SURF_ID_BRAIN)[0]
-    if len(idx) != 1:
-        raise RuntimeError('BEM model does not have the inner skull '
-                           'triangulation')
-    _filter_source_spaces(bem['surfs'][idx[0]], mindist, mri_head_t, src,
-                          n_jobs)
-    logger.info('')
+    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
+        update_kwargs, bem = _prepare_for_forward(
+            src, mri_head_t, info, bem, mindist, n_jobs, bem_extra, trans,
+            info_extra, meg, eeg, ignore_ref, fname, overwrite)
+    del (src, mri_head_t, trans, info_extra, bem_extra, mindist,
+         meg, eeg, ignore_ref)
 
     # Time to do the heavy lifting: MEG first, then EEG
     coil_types = ['meg', 'eeg']
     coils = [megcoils, eegels]
-    cfs = [megcf, None]
     ccoils = [compcoils, None]
-    ccfs = [compcf, None]
     infos = [meg_info, None]
-    megfwd, eegfwd = _compute_forwards(src, bem, coils, cfs, ccoils, ccfs,
+    megfwd, eegfwd = _compute_forwards(rr, bem, coils, ccoils,
                                        infos, coil_types, n_jobs)
 
-    # merge forwards into one (creates two Forward objects)
-    megfwd = _to_forward_dict(megfwd, None, megnames, coord_frame,
-                              FIFF.FIFFV_MNE_FREE_ORI)
-    eegfwd = _to_forward_dict(eegfwd, None, eegnames, coord_frame,
-                              FIFF.FIFFV_MNE_FREE_ORI)
-    fwd = _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=False)
+    # merge forwards
+    fwd = _merge_meg_eeg_fwds(_to_forward_dict(megfwd, megnames),
+                              _to_forward_dict(eegfwd, eegnames),
+                              verbose=False)
     logger.info('')
 
-    # pick out final dict info
-    picks = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])
-    info = pick_info(info, picks)
-    source_rr = np.concatenate([s['rr'][s['vertno']] for s in src])
-    # deal with free orientations:
-    nsource = fwd['sol']['data'].shape[1] // 3
-    source_nn = np.tile(np.eye(3), (nsource, 1))
-
     # Don't transform the source spaces back into MRI coordinates (which is
     # done in the C code) because mne-python assumes forward solution source
-    # spaces are in head coords. We will delete some keys to clean up the
-    # source space, though:
-    for key in ['working_dir', 'command_line']:
-        if key in src.info:
-            del src.info[key]
-    fwd.update(dict(nchan=fwd['sol']['data'].shape[0], nsource=nsource,
-                    info=info, src=src, source_nn=source_nn,
-                    source_rr=source_rr, surf_ori=False,
-                    mri_head_t=mri_head_t))
-    fwd['info']['mri_head_t'] = mri_head_t
+    # spaces are in head coords.
+    fwd.update(**update_kwargs)
     if fname is not None:
         logger.info('writing %s...', fname)
         write_forward_solution(fname, fwd, overwrite, verbose=False)
@@ -482,19 +562,23 @@ def make_forward_solution(info, mri, src, bem, fname=None, meg=True, eeg=True,
     return fwd
 
 
-def _to_forward_dict(fwd, fwd_grad, names, coord_frame, source_ori):
+def _to_forward_dict(fwd, names, fwd_grad=None,
+                     coord_frame=FIFF.FIFFV_COORD_HEAD,
+                     source_ori=FIFF.FIFFV_MNE_FREE_ORI):
     """Convert forward solution matrices to dicts"""
-    if fwd is not None:
-        sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0],
-                   row_names=names, col_names=[])
-        fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
-                      coord_frame=coord_frame, sol_grad=None,
-                      nchan=sol['nrow'], _orig_source_ori=source_ori,
-                      _orig_sol=sol['data'].copy(), _orig_sol_grad=None)
-        if fwd_grad is not None:
-            sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1],
-                            ncol=fwd_grad.shape[0], row_names=names,
-                            col_names=[])
-            fwd.update(dict(sol_grad=sol_grad),
-                       _orig_sol_grad=sol_grad['data'].copy())
+    assert names is not None
+    if len(fwd) == 0:
+        return None
+    sol = dict(data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0],
+               row_names=names, col_names=[])
+    fwd = Forward(sol=sol, source_ori=source_ori, nsource=sol['ncol'],
+                  coord_frame=coord_frame, sol_grad=None,
+                  nchan=sol['nrow'], _orig_source_ori=source_ori,
+                  _orig_sol=sol['data'].copy(), _orig_sol_grad=None)
+    if fwd_grad is not None:
+        sol_grad = dict(data=fwd_grad.T, nrow=fwd_grad.shape[1],
+                        ncol=fwd_grad.shape[0], row_names=names,
+                        col_names=[])
+        fwd.update(dict(sol_grad=sol_grad),
+                   _orig_sol_grad=sol_grad['data'].copy())
     return fwd
diff --git a/mne/forward/forward.py b/mne/forward/forward.py
index 99a417b..c937c5b 100644
--- a/mne/forward/forward.py
+++ b/mne/forward/forward.py
@@ -18,6 +18,8 @@ import os
 from os import path as op
 import tempfile
 
+from ..fixes import sparse_block_diag
+from ..io import RawArray
 from ..io.constants import FIFF
 from ..io.open import fiff_open
 from ..io.tree import dir_tree_find
@@ -31,14 +33,15 @@ from ..io.write import (write_int, start_block, end_block,
                         write_coord_trans, write_ch_info, write_name_list,
                         write_string, start_file, end_file, write_id)
 from ..io.base import _BaseRaw
-from ..evoked import Evoked, write_evokeds
+from ..evoked import Evoked, write_evokeds, EvokedArray
 from ..epochs import Epochs
-from ..source_space import (read_source_spaces_from_tree,
+from ..source_space import (_read_source_spaces_from_tree,
                             find_source_space_hemi,
                             _write_source_spaces_to_fid)
+from ..source_estimate import VolSourceEstimate
 from ..transforms import (transform_surface_to, invert_transform,
                           write_trans)
-from ..utils import (_check_fname, get_subjects_dir, has_command_line_tools,
+from ..utils import (_check_fname, get_subjects_dir, has_mne_c,
                      run_subprocess, check_fname, logger, verbose)
 
 
@@ -56,15 +59,28 @@ class Forward(dict):
         nchan = len(pick_types(self['info'], meg=False, eeg=True))
         entr += ' | ' + 'EEG channels: %d' % nchan
 
-        if self['src'][0]['type'] == 'surf':
+        src_types = np.array([src['type'] for src in self['src']])
+        if (src_types == 'surf').all():
             entr += (' | Source space: Surface with %d vertices'
                      % self['nsource'])
-        elif self['src'][0]['type'] == 'vol':
+        elif (src_types == 'vol').all():
             entr += (' | Source space: Volume with %d grid points'
                      % self['nsource'])
-        elif self['src'][0]['type'] == 'discrete':
+        elif (src_types == 'discrete').all():
             entr += (' | Source space: Discrete with %d dipoles'
                      % self['nsource'])
+        else:
+            count_string = ''
+            if (src_types == 'surf').any():
+                count_string += '%d surface, ' % (src_types == 'surf').sum()
+            if (src_types == 'vol').any():
+                count_string += '%d volume, ' % (src_types == 'vol').sum()
+            if (src_types == 'discrete').any():
+                count_string += '%d discrete, ' \
+                                % (src_types == 'discrete').sum()
+            count_string = count_string.rstrip(', ')
+            entr += (' | Source space: Mixed (%s) with %d vertices'
+                     % (count_string, self['nsource']))
 
         if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:
             entr += (' | Source orientation: Unknown')
@@ -195,45 +211,37 @@ def _inv_block_diag(A, n):
     return bd
 
 
+def _get_tag_int(fid, node, name, id_):
+    """Helper to check we have an appropriate tag"""
+    tag = find_tag(fid, node, id_)
+    if tag is None:
+        fid.close()
+        raise ValueError(name + ' tag not found')
+    return int(tag.data)
+
+
 def _read_one(fid, node):
     """Read all interesting stuff for one forward solution
     """
+    # This function assumes the fid is open as a context manager
     if node is None:
         return None
 
     one = Forward()
-
-    tag = find_tag(fid, node, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
-    if tag is None:
-        fid.close()
-        raise ValueError('Source orientation tag not found')
-    one['source_ori'] = int(tag.data)
-
-    tag = find_tag(fid, node, FIFF.FIFF_MNE_COORD_FRAME)
-    if tag is None:
-        fid.close()
-        raise ValueError('Coordinate frame tag not found')
-    one['coord_frame'] = int(tag.data)
-
-    tag = find_tag(fid, node, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
-    if tag is None:
-        fid.close()
-        raise ValueError('Number of sources not found')
-    one['nsource'] = int(tag.data)
-
-    tag = find_tag(fid, node, FIFF.FIFF_NCHAN)
-    if tag is None:
-        fid.close()
-        raise ValueError('Number of channels not found')
-    one['nchan'] = int(tag.data)
-
+    one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',
+                                     FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+    one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',
+                                      FIFF.FIFF_MNE_COORD_FRAME)
+    one['nsource'] = _get_tag_int(fid, node, 'Number of sources',
+                                  FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+    one['nchan'] = _get_tag_int(fid, node, 'Number of channels',
+                                FIFF.FIFF_NCHAN)
     try:
         one['sol'] = _read_named_matrix(fid, node,
                                         FIFF.FIFF_MNE_FORWARD_SOLUTION)
         one['sol'] = _transpose_named_matrix(one['sol'], copy=False)
         one['_orig_sol'] = one['sol']['data'].copy()
-    except:
-        fid.close()
+    except Exception:
         logger.error('Forward solution data not found')
         raise
 
@@ -242,27 +250,25 @@ def _read_one(fid, node):
         one['sol_grad'] = _read_named_matrix(fid, node, fwd_type)
         one['sol_grad'] = _transpose_named_matrix(one['sol_grad'], copy=False)
         one['_orig_sol_grad'] = one['sol_grad']['data'].copy()
-    except:
+    except Exception:
         one['sol_grad'] = None
 
     if one['sol']['data'].shape[0] != one['nchan'] or \
             (one['sol']['data'].shape[1] != one['nsource'] and
              one['sol']['data'].shape[1] != 3 * one['nsource']):
-        fid.close()
         raise ValueError('Forward solution matrix has wrong dimensions')
 
     if one['sol_grad'] is not None:
         if one['sol_grad']['data'].shape[0] != one['nchan'] or \
                 (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and
                  one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):
-            fid.close()
             raise ValueError('Forward solution gradient matrix has '
                              'wrong dimensions')
 
     return one
 
 
-def read_forward_meas_info(tree, fid):
+def _read_forward_meas_info(tree, fid):
     """Read light measurement info from forward operator
 
     Parameters
@@ -277,12 +283,12 @@ def read_forward_meas_info(tree, fid):
     info : instance of mne.io.meas_info.Info
         The measurement info.
     """
+    # This function assumes fid is being used as a context manager
     info = Info()
 
     # Information from the MRI file
     parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
     if len(parent_mri) == 0:
-        fid.close()
         raise ValueError('No parent MEG information found in operator')
     parent_mri = parent_mri[0]
 
@@ -294,7 +300,6 @@ def read_forward_meas_info(tree, fid):
     # Information from the MEG file
     parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)
     if len(parent_meg) == 0:
-        fid.close()
         raise ValueError('No parent MEG information found in operator')
     parent_meg = parent_meg[0]
 
@@ -323,30 +328,33 @@ def read_forward_meas_info(tree, fid):
     coord_device = FIFF.FIFFV_COORD_DEVICE
     coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD
     if tag is None:
-        fid.close()
         raise ValueError('MRI/head coordinate transformation not found')
+    cand = tag.data
+    if cand['from'] == coord_mri and cand['to'] == coord_head:
+        info['mri_head_t'] = cand
     else:
-        cand = tag.data
-        if cand['from'] == coord_mri and cand['to'] == coord_head:
-            info['mri_head_t'] = cand
-        else:
-            raise ValueError('MRI/head coordinate transformation not found')
+        raise ValueError('MRI/head coordinate transformation not found')
 
     #   Get the MEG device <-> head coordinate transformation
     tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)
     if tag is None:
-        fid.close()
         raise ValueError('MEG/head coordinate transformation not found')
+    cand = tag.data
+    if cand['from'] == coord_device and cand['to'] == coord_head:
+        info['dev_head_t'] = cand
+    elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
+        info['ctf_head_t'] = cand
     else:
-        cand = tag.data
-        if cand['from'] == coord_device and cand['to'] == coord_head:
-            info['dev_head_t'] = cand
-        elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:
-            info['ctf_head_t'] = cand
-        else:
-            raise ValueError('MEG/head coordinate transformation not found')
+        raise ValueError('MEG/head coordinate transformation not found')
 
     info['bads'] = read_bad_channels(fid, parent_meg)
+    # clean up our bad list, old versions could have non-existent bads
+    info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]
+
+    # Check if a custom reference has been applied
+    tag = find_tag(fid, parent_mri, FIFF.FIFF_CUSTOM_REF)
+    info['custom_ref_applied'] = bool(tag.data) if tag is not None else False
+    info._check_consistency()
     return info
 
 
@@ -418,123 +426,115 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
     -------
     fwd : instance of Forward
         The forward solution.
+
+    See Also
+    --------
+    write_forward_solution, make_forward_solution
     """
     check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
 
     #   Open the file, create directory
     logger.info('Reading forward solution from %s...' % fname)
-    fid, tree, _ = fiff_open(fname)
-
-    #   Find all forward solutions
-    fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
-    if len(fwds) == 0:
-        fid.close()
-        raise ValueError('No forward solutions in %s' % fname)
-
-    #   Parent MRI data
-    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
-    if len(parent_mri) == 0:
-        fid.close()
-        raise ValueError('No parent MRI information in %s' % fname)
-    parent_mri = parent_mri[0]
-
-    try:
-        src = read_source_spaces_from_tree(fid, tree, add_geom=False)
-    except Exception as inst:
-        fid.close()
-        raise ValueError('Could not read the source spaces (%s)' % inst)
-
-    for s in src:
-        s['id'] = find_source_space_hemi(s)
-
-    fwd = None
-
-    #   Locate and read the forward solutions
-    megnode = None
-    eegnode = None
-    for k in range(len(fwds)):
-        tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
-        if tag is None:
-            fid.close()
-            raise ValueError('Methods not listed for one of the forward '
-                             'solutions')
-
-        if tag.data == FIFF.FIFFV_MNE_MEG:
-            megnode = fwds[k]
-        elif tag.data == FIFF.FIFFV_MNE_EEG:
-            eegnode = fwds[k]
-
-    megfwd = _read_one(fid, megnode)
-    if megfwd is not None:
-        if is_fixed_orient(megfwd):
-            ori = 'fixed'
-        else:
-            ori = 'free'
-        logger.info('    Read MEG forward solution (%d sources, %d channels, '
-                    '%s orientations)' % (megfwd['nsource'], megfwd['nchan'],
-                                          ori))
-
-    eegfwd = _read_one(fid, eegnode)
-    if eegfwd is not None:
-        if is_fixed_orient(eegfwd):
-            ori = 'fixed'
-        else:
-            ori = 'free'
-        logger.info('    Read EEG forward solution (%d sources, %d channels, '
-                    '%s orientations)' % (eegfwd['nsource'], eegfwd['nchan'],
-                                          ori))
+    f, tree, _ = fiff_open(fname)
+    with f as fid:
+        #   Find all forward solutions
+        fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)
+        if len(fwds) == 0:
+            raise ValueError('No forward solutions in %s' % fname)
+
+        #   Parent MRI data
+        parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            raise ValueError('No parent MRI information in %s' % fname)
+        parent_mri = parent_mri[0]
+
+        src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)
+        for s in src:
+            s['id'] = find_source_space_hemi(s)
+
+        fwd = None
+
+        #   Locate and read the forward solutions
+        megnode = None
+        eegnode = None
+        for k in range(len(fwds)):
+            tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)
+            if tag is None:
+                raise ValueError('Methods not listed for one of the forward '
+                                 'solutions')
+
+            if tag.data == FIFF.FIFFV_MNE_MEG:
+                megnode = fwds[k]
+            elif tag.data == FIFF.FIFFV_MNE_EEG:
+                eegnode = fwds[k]
+
+        megfwd = _read_one(fid, megnode)
+        if megfwd is not None:
+            if is_fixed_orient(megfwd):
+                ori = 'fixed'
+            else:
+                ori = 'free'
+            logger.info('    Read MEG forward solution (%d sources, '
+                        '%d channels, %s orientations)'
+                        % (megfwd['nsource'], megfwd['nchan'], ori))
+
+        eegfwd = _read_one(fid, eegnode)
+        if eegfwd is not None:
+            if is_fixed_orient(eegfwd):
+                ori = 'fixed'
+            else:
+                ori = 'free'
+            logger.info('    Read EEG forward solution (%d sources, '
+                        '%d channels, %s orientations)'
+                        % (eegfwd['nsource'], eegfwd['nchan'], ori))
 
-    #   Merge the MEG and EEG solutions together
-    try:
         fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)
-    except:
-        fid.close()
-        raise
 
-    #   Get the MRI <-> head coordinate transformation
-    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
-    if tag is None:
-        fid.close()
-        raise ValueError('MRI/head coordinate transformation not found')
-    else:
+        #   Get the MRI <-> head coordinate transformation
+        tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+        if tag is None:
+            raise ValueError('MRI/head coordinate transformation not found')
         mri_head_t = tag.data
         if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
                 mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
             mri_head_t = invert_transform(mri_head_t)
-            if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI
-                    or mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
+            if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or
+                    mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):
                 fid.close()
                 raise ValueError('MRI/head coordinate transformation not '
                                  'found')
-    fwd['mri_head_t'] = mri_head_t
-
-    #
-    # get parent MEG info
-    #
-    fwd['info'] = read_forward_meas_info(tree, fid)
-
-    # MNE environment
-    parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
-    if len(parent_env) > 0:
-        parent_env = parent_env[0]
-        tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
-        if tag is not None:
-            fwd['info']['working_dir'] = tag.data
-        tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
-        if tag is not None:
-            fwd['info']['command_line'] = tag.data
-
-    fid.close()
+        fwd['mri_head_t'] = mri_head_t
+
+        #
+        # get parent MEG info
+        #
+        fwd['info'] = _read_forward_meas_info(tree, fid)
+
+        # MNE environment
+        parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
+        if len(parent_env) > 0:
+            parent_env = parent_env[0]
+            tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)
+            if tag is not None:
+                fwd['info']['working_dir'] = tag.data
+            tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)
+            if tag is not None:
+                fwd['info']['command_line'] = tag.data
 
     #   Transform the source spaces to the correct coordinate frame
     #   if necessary
 
-    if (fwd['coord_frame'] != FIFF.FIFFV_COORD_MRI and
-            fwd['coord_frame'] != FIFF.FIFFV_COORD_HEAD):
+    # Make sure forward solution is in either the MRI or HEAD coordinate frame
+    if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):
         raise ValueError('Only forward solutions computed in MRI or head '
                          'coordinates are acceptable')
 
     nuse = 0
+
+    # Transform each source space to the HEAD or MRI coordinate frame,
+    # depending on the coordinate frame of the forward solution
+    # NOTE: the function transform_surface_to will also work on discrete and
+    # volume sources
     for s in src:
         try:
             s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)
@@ -543,6 +543,7 @@ def read_forward_solution(fname, force_fixed=False, surf_ori=False,
 
         nuse += s['nuse']
 
+    # Make sure the number of sources match after transformation
     if nuse != fwd['nsource']:
         raise ValueError('Source spaces do not match the forward solution.')
 
@@ -609,14 +610,14 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
             fix_rot = _block_diag(fwd['source_nn'].T, 1)
             # newer versions of numpy require explicit casting here, so *= no
             # longer works
-            fwd['sol']['data'] = (fwd['_orig_sol']
-                                  * fix_rot).astype('float32')
+            fwd['sol']['data'] = (fwd['_orig_sol'] *
+                                  fix_rot).astype('float32')
             fwd['sol']['ncol'] = fwd['nsource']
             fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
 
             if fwd['sol_grad'] is not None:
-                fwd['sol_grad']['data'] = np.dot(fwd['_orig_sol_grad'],
-                                                 np.kron(fix_rot, np.eye(3)))
+                x = sparse_block_diag([fix_rot] * 3)
+                fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x  # dot prod
                 fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
             logger.info('    [done]')
         fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI
@@ -658,8 +659,8 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False,
         fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot
         fwd['sol']['ncol'] = 3 * fwd['nsource']
         if fwd['sol_grad'] is not None:
-            fwd['sol_grad'] = np.dot(fwd['_orig_sol_grad'] *
-                                     np.kron(surf_rot, np.eye(3)))
+            x = sparse_block_diag([surf_rot] * 3)
+            fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x  # dot prod
             fwd['sol_grad']['ncol'] = 3 * fwd['nsource']
         logger.info('[done]')
         fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI
@@ -694,6 +695,10 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
         If True, overwrite destination file (if it exists).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_forward_solution
     """
     check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz'))
 
@@ -722,6 +727,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])
     if fwd['info']['mri_id'] is not None:
         write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])
+    # store the MRI to HEAD transform in MRI file
     write_coord_trans(fid, fwd['info']['mri_head_t'])
     end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
 
@@ -733,6 +739,8 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
     for s in fwd['src']:
         s = deepcopy(s)
         try:
+            # returns source space to original coordinate frame
+            # usually MRI
             s = transform_surface_to(s, fwd['mri_head_t']['from'],
                                      fwd['mri_head_t'])
         except Exception as inst:
@@ -761,7 +769,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
         inv_rot = _inv_block_diag(fwd['source_nn'].T, 3)
         sol = sol * inv_rot
         if sol_grad is not None:
-            sol_grad = np.dot(sol_grad * np.kron(inv_rot, np.eye(3)))
+            sol_grad = sol_grad * sparse_block_diag([inv_rot] * 3)  # dot prod
 
     #
     # MEG forward solution
@@ -788,7 +796,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
         write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)
         if sol_grad is not None:
             meg_solution_grad = dict(data=sol_grad[picks_meg],
-                                     nrow=n_meg, ncol=n_col,
+                                     nrow=n_meg, ncol=n_col * 3,
                                      row_names=row_names_meg, col_names=[])
             meg_solution_grad = _transpose_named_matrix(meg_solution_grad,
                                                         copy=False)
@@ -812,9 +820,9 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None):
         write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)
         if sol_grad is not None:
             eeg_solution_grad = dict(data=sol_grad[picks_eeg],
-                                     nrow=n_eeg, ncol=n_col,
+                                     nrow=n_eeg, ncol=n_col * 3,
                                      row_names=row_names_eeg, col_names=[])
-            meg_solution_grad = _transpose_named_matrix(eeg_solution_grad,
+            eeg_solution_grad = _transpose_named_matrix(eeg_solution_grad,
                                                         copy=False)
             write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,
                                eeg_solution_grad)
@@ -857,6 +865,7 @@ def write_forward_meas_info(fid, info):
     info : instance of mne.io.meas_info.Info
         The measurement info.
     """
+    info._check_consistency()
     #
     # Information from the MEG file
     #
@@ -864,6 +873,7 @@ def write_forward_meas_info(fid, info):
     write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])
     if info['meas_id'] is not None:
         write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
+    # get transformation from CTF and DEVICE to HEAD coordinate frame
     meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))
     if meg_head_t is None:
         fid.close()
@@ -1016,15 +1026,21 @@ def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
 def _stc_src_sel(src, stc):
     """ Select the vertex indices of a source space using a source estimate
     """
-    src_sel_lh = np.intersect1d(src[0]['vertno'], stc.vertno[0])
-    src_sel_lh = np.searchsorted(src[0]['vertno'], src_sel_lh)
-
-    src_sel_rh = np.intersect1d(src[1]['vertno'], stc.vertno[1])
-    src_sel_rh = (np.searchsorted(src[1]['vertno'], src_sel_rh)
-                  + len(src[0]['vertno']))
-
-    src_sel = np.r_[src_sel_lh, src_sel_rh]
-
+    if isinstance(stc, VolSourceEstimate):
+        vertices = [stc.vertices]
+    else:
+        vertices = stc.vertices
+    if not len(src) == len(vertices):
+        raise RuntimeError('Mismatch between number of source spaces (%s) and '
+                           'STC vertices (%s)' % (len(src), len(vertices)))
+    src_sels = []
+    offset = 0
+    for s, v in zip(src, vertices):
+        src_sel = np.intersect1d(s['vertno'], v)
+        src_sel = np.searchsorted(s['vertno'], src_sel)
+        src_sels.append(src_sel + offset)
+        offset += len(s['vertno'])
+    src_sel = np.concatenate(src_sels)
     return src_sel
 
 
@@ -1074,7 +1090,10 @@ def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
                       'currents are used.' % (1e9 * max_cur))
 
     src_sel = _stc_src_sel(fwd['src'], stc)
-    n_src = sum([len(v) for v in stc.vertno])
+    if isinstance(stc, VolSourceEstimate):
+        n_src = len(stc.vertices)
+    else:
+        n_src = sum([len(v) for v in stc.vertices])
     if len(src_sel) != n_src:
         raise RuntimeError('Only %i of %i SourceEstimate vertices found in '
                            'fwd' % (len(src_sel), n_src))
@@ -1091,8 +1110,8 @@ def _apply_forward(fwd, stc, start=None, stop=None, verbose=None):
 
 
 @verbose
-def apply_forward(fwd, stc, evoked_template, start=None, stop=None,
-                  verbose=None):
+def apply_forward(fwd, stc, info=None, start=None, stop=None,
+                  verbose=None, evoked_template=None):
     """
     Project source space currents to sensor space using a forward operator.
 
@@ -1108,18 +1127,20 @@ def apply_forward(fwd, stc, evoked_template, start=None, stop=None,
 
     Parameters
     ----------
-    forward : dict
+    fwd : dict
         Forward operator to use. Has to be fixed-orientation.
     stc : SourceEstimate
         The source estimate from which the sensor space data is computed.
-    evoked_template : Evoked object
-        Evoked object used as template to generate the output argument.
+    info : instance of mne.io.meas_info.Info
+        Measurement info to generate the evoked.
     start : int, optional
         Index of first time sample (index not time is seconds).
     stop : int, optional
         Index of first time sample not to include (index not time is seconds).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+    evoked_template : Evoked object (deprecated)
+        Evoked object used as template to generate the output argument.
 
     Returns
     -------
@@ -1130,35 +1151,47 @@ def apply_forward(fwd, stc, evoked_template, start=None, stop=None,
     --------
     apply_forward_raw: Compute sensor space data and return a Raw object.
     """
+    if evoked_template is None and info is None:
+        raise ValueError('You have to provide the info parameter.')
+
+    if evoked_template is not None and not isinstance(evoked_template, Info):
+        warnings.warn('The "evoked_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = evoked_template.info
+
+    if info is not None and not isinstance(info, Info):
+        warnings.warn('The "evoked_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = info.info
 
     # make sure evoked_template contains all channels in fwd
     for ch_name in fwd['sol']['row_names']:
-        if ch_name not in evoked_template.ch_names:
+        if ch_name not in info['ch_names']:
             raise ValueError('Channel %s of forward operator not present in '
                              'evoked_template.' % ch_name)
 
     # project the source estimate to the sensor space
     data, times = _apply_forward(fwd, stc, start, stop)
 
-    # store sensor data in an Evoked object using the template
-    evoked = deepcopy(evoked_template)
+    # fill the measurement info
+    sfreq = float(1.0 / stc.tstep)
+    info_out = _fill_measurement_info(info, fwd, sfreq)
 
-    evoked.nave = 1
-    evoked.data = data
-    evoked.times = times
+    evoked = EvokedArray(data, info_out, times[0], nave=1)
 
-    sfreq = float(1.0 / stc.tstep)
+    evoked.times = times
     evoked.first = int(np.round(evoked.times[0] * sfreq))
     evoked.last = evoked.first + evoked.data.shape[1] - 1
 
-    # fill the measurement info
-    evoked.info = _fill_measurement_info(evoked.info, fwd, sfreq)
-
     return evoked
 
 
 @verbose
-def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
+def apply_forward_raw(fwd, stc, info, start=None, stop=None,
                       verbose=None):
     """Project source space currents to sensor space using a forward operator
 
@@ -1166,19 +1199,19 @@ def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
     pick_channels_forward or pick_types_forward to restrict the solution to a
     subset of channels.
 
-    The function returns a Raw object, which is constructed from raw_template.
-    The raw_template should be from the same MEG system on which the original
-    data was acquired. An exception will be raised if the forward operator
-    contains channels that are not present in the template.
+    The function returns a Raw object, which is constructed using provided
+    info. The info object should be from the same MEG system on which the
+    original data was acquired. An exception will be raised if the forward
+    operator contains channels that are not present in the info.
 
     Parameters
     ----------
-    forward : dict
+    fwd : dict
         Forward operator to use. Has to be fixed-orientation.
     stc : SourceEstimate
         The source estimate from which the sensor space data is computed.
-    raw_template : Raw object
-        Raw object used as template to generate the output argument.
+    info : Instance of mne.io.meas_info.Info
+        The measurement info.
     start : int, optional
         Index of first time sample (index not time is seconds).
     stop : int, optional
@@ -1195,32 +1228,33 @@ def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
     --------
     apply_forward: Compute sensor space data and return an Evoked object.
     """
-
-    # make sure raw_template contains all channels in fwd
+    if isinstance(info, _BaseRaw):
+        warnings.warn('The "Raw_template" parameter is being deprecated '
+                      'and will be removed in MNE-0.11. '
+                      'Please provide info parameter instead',
+                      DeprecationWarning)
+        info = info.info
+
+    # make sure info contains all channels in fwd
     for ch_name in fwd['sol']['row_names']:
-        if ch_name not in raw_template.ch_names:
+        if ch_name not in info['ch_names']:
             raise ValueError('Channel %s of forward operator not present in '
-                             'raw_template.' % ch_name)
+                             'info.' % ch_name)
 
     # project the source estimate to the sensor space
     data, times = _apply_forward(fwd, stc, start, stop)
 
-    # store sensor data in Raw object using the template
-    raw = raw_template.copy()
+    sfreq = 1.0 / stc.tstep
+    info = _fill_measurement_info(info, fwd, sfreq)
+    info['projs'] = []
+    # store sensor data in Raw object using the info
+    raw = RawArray(data, info)
     raw.preload = True
-    raw._data = data
-    raw._times = times
 
-    sfreq = float(1.0 / stc.tstep)
-    raw.first_samp = int(np.round(raw._times[0] * sfreq))
-    raw.last_samp = raw.first_samp + raw._data.shape[1] - 1
-
-    # fill the measurement info
-    raw.info = _fill_measurement_info(raw.info, fwd, sfreq)
-
-    raw.info['projs'] = []
+    raw._first_samps = np.array([int(np.round(times[0] * sfreq))])
+    raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])
     raw._projector = None
-
+    raw._update_times()
     return raw
 
 
@@ -1238,6 +1272,10 @@ def restrict_forward_to_stc(fwd, stc):
     -------
     fwd_out : dict
         Restricted forward operator.
+
+    See Also
+    --------
+    restrict_forward_to_label
     """
 
     fwd_out = deepcopy(fwd)
@@ -1256,12 +1294,12 @@ def restrict_forward_to_stc(fwd, stc):
     fwd_out['sol']['ncol'] = len(idx)
 
     for i in range(2):
-        fwd_out['src'][i]['vertno'] = stc.vertno[i]
-        fwd_out['src'][i]['nuse'] = len(stc.vertno[i])
+        fwd_out['src'][i]['vertno'] = stc.vertices[i]
+        fwd_out['src'][i]['nuse'] = len(stc.vertices[i])
         fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
         fwd_out['src'][i]['inuse'].fill(0)
-        fwd_out['src'][i]['inuse'][stc.vertno[i]] = 1
-        fwd_out['src'][i]['use_tris'] = np.array([])
+        fwd_out['src'][i]['inuse'][stc.vertices[i]] = 1
+        fwd_out['src'][i]['use_tris'] = np.array([], int)
         fwd_out['src'][i]['nuse_tri'] = np.array([0])
 
     return fwd_out
@@ -1281,6 +1319,10 @@ def restrict_forward_to_label(fwd, labels):
     -------
     fwd_out : dict
         Restricted forward operator.
+
+    See Also
+    --------
+    restrict_forward_to_stc
     """
 
     if not isinstance(labels, list):
@@ -1294,11 +1336,11 @@ def restrict_forward_to_label(fwd, labels):
     fwd_out['sol']['ncol'] = 0
 
     for i in range(2):
-        fwd_out['src'][i]['vertno'] = np.array([])
+        fwd_out['src'][i]['vertno'] = np.array([], int)
         fwd_out['src'][i]['nuse'] = 0
         fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()
         fwd_out['src'][i]['inuse'].fill(0)
-        fwd_out['src'][i]['use_tris'] = np.array([])
+        fwd_out['src'][i]['use_tris'] = np.array([], int)
         fwd_out['src'][i]['nuse_tri'] = np.array([0])
 
     for label in labels:
@@ -1309,8 +1351,8 @@ def restrict_forward_to_label(fwd, labels):
         else:
             i = 1
             src_sel = np.intersect1d(fwd['src'][1]['vertno'], label.vertices)
-            src_sel = (np.searchsorted(fwd['src'][1]['vertno'], src_sel)
-                       + len(fwd['src'][0]['vertno']))
+            src_sel = (np.searchsorted(fwd['src'][1]['vertno'], src_sel) +
+                       len(fwd['src'][0]['vertno']))
 
         fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],
                                           fwd['source_rr'][src_sel]])
@@ -1371,13 +1413,12 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
     bem : str | None
         Name of the BEM to use (e.g., "sample-5120-5120-5120"). If None
         (Default), the MNE default will be used.
-    trans : str | None
-        File name of the trans file. If None, mri must not be None.
-    mri : dict | str | None
-        Either a transformation (usually made using mne_analyze) or an
-        info dict (usually opened using read_trans()), or a filename.
-        If dict, the trans will be saved in a temporary directory. If
-        None, trans must not be None.
+    mri : str | None
+        The name of the trans file in FIF format.
+        If None, trans must not be None.
+    trans : dict | str | None
+        File name of the trans file in text format.
+        If None, mri must not be None.
     eeg : bool
         If True (Default), include EEG computations.
     meg : bool
@@ -1399,12 +1440,16 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
+    See Also
+    --------
+    forward.make_forward_solution
+
     Returns
     -------
     fwd : dict
         The generated forward solution.
     """
-    if not has_command_line_tools():
+    if not has_mne_c():
         raise RuntimeError('mne command line tools could not be found')
 
     # check for file existence
@@ -1541,7 +1586,7 @@ def do_forward_solution(subject, meas, fname=None, src=None, spacing=None,
         logger.info('Running forward solution generation command with '
                     'subjects_dir %s' % subjects_dir)
         run_subprocess(cmd, env=env)
-    except:
+    except Exception:
         raise
     else:
         fwd = read_forward_solution(op.join(path, fname), verbose=False)
@@ -1597,16 +1642,16 @@ def average_forward_solutions(fwds, weights=None):
         check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',
                       'source_rr', 'source_ori', 'surf_ori', 'coord_frame',
                       'mri_head_t', 'nsource']
-        if not all([key in fwd for key in check_keys]):
+        if not all(key in fwd for key in check_keys):
             raise KeyError('forward solution dict does not have all standard '
                            'entries, cannot compute average.')
 
     # check forward solution compatibility
-    if any([fwd['sol'][k] != fwds[0]['sol'][k]
-            for fwd in fwds[1:] for k in ['nrow', 'ncol']]):
+    if any(fwd['sol'][k] != fwds[0]['sol'][k]
+           for fwd in fwds[1:] for k in ['nrow', 'ncol']):
         raise ValueError('Forward solutions have incompatible dimensions')
-    if any([fwd[k] != fwds[0][k] for fwd in fwds[1:]
-            for k in ['source_ori', 'surf_ori', 'coord_frame']]):
+    if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]
+           for k in ['source_ori', 'surf_ori', 'coord_frame']):
         raise ValueError('Forward solutions have incompatible orientations')
 
     # actually average them (solutions and gradients)
diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py
index 7422e38..43fbc35 100644
--- a/mne/forward/tests/test_field_interpolation.py
+++ b/mne/forward/tests/test_field_interpolation.py
@@ -1,27 +1,32 @@
 import numpy as np
 from os import path as op
 from numpy.polynomial import legendre
-from numpy.testing.utils import assert_allclose, assert_array_equal
+from numpy.testing.utils import (assert_allclose, assert_array_equal,
+                                 assert_array_almost_equal)
 from nose.tools import assert_raises, assert_true
 
 from mne.forward import _make_surface_mapping, make_field_map
-from mne.surface import get_meg_helmet_surf, get_head_surf
-from mne.datasets import sample
 from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
                                     _get_legen_table,
                                     _get_legen_lut_fast,
-                                    _get_legen_lut_accurate)
-from mne import pick_types_evoked, read_evokeds
+                                    _get_legen_lut_accurate,
+                                    _do_cross_dots)
+from mne.forward._make_forward import _create_meg_coils
+from mne.forward._field_interpolation import _setup_dots
+from mne.surface import get_meg_helmet_surf, get_head_surf
+from mne.datasets import testing
+from mne import read_evokeds
 from mne.fixes import partial
 from mne.externals.six.moves import zip
+from mne.utils import run_tests_if_main, slow_test
 
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 evoked_fname = op.join(base_dir, 'test-ave.fif')
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 trans_fname = op.join(data_path, 'MEG', 'sample',
-                      'sample_audvis_raw-trans.fif')
+                      'sample_audvis_trunc-trans.fif')
 subjects_dir = op.join(data_path, 'subjects')
 
 
@@ -38,7 +43,7 @@ def test_legendre_val():
     # Table approximation
     for fun, nc in zip([_get_legen_lut_fast, _get_legen_lut_accurate],
                        [100, 50]):
-        lut, n_fact = _get_legen_table('eeg', n_coeff=nc)
+        lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
         vals_i = fun(xs, lut)
         # Need a "1:" here because we omit the first coefficient in our table!
         assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
@@ -54,8 +59,8 @@ def test_legendre_val():
         # compare to numpy
         n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
         coeffs = np.zeros((n_terms,) + beta.shape)
-        coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0)
-                      * (2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
+        coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
+                      (2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
         # can't use tensor=False here b/c it isn't in old numpy
         c2 = np.empty((20, 30))
         for ci1 in range(20):
@@ -67,10 +72,10 @@ def test_legendre_val():
     # compare fast and slow for MEG
     ctheta = np.random.rand(20 * 30) * 2.0 - 1.0
     beta = np.random.rand(20 * 30) * 0.8
-    lut, n_fact = _get_legen_table('meg', n_coeff=50)
+    lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
     fun = partial(_get_legen_lut_fast, lut=lut)
     coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
-    lut, n_fact = _get_legen_table('meg', n_coeff=100)
+    lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
     fun = partial(_get_legen_lut_accurate, lut=lut)
     coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
 
@@ -79,18 +84,17 @@ def test_legendre_table():
     """Test Legendre table calculation
     """
     # double-check our table generation
-    n_do = 10
+    n = 10
     for ch_type in ['eeg', 'meg']:
-        lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=50)
-        lut1 = lut1[:, :n_do - 1].copy()
-        n_fact1 = n_fact1[:n_do - 1].copy()
-        lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n_do,
-                                         force_calc=True)
+        lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
+        lut1 = lut1[:, :n - 1].copy()
+        n_fact1 = n_fact1[:n - 1].copy()
+        lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
         assert_allclose(lut1, lut2)
         assert_allclose(n_fact1, n_fact2)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_make_field_map_eeg():
     """Test interpolation of EEG field onto head
     """
@@ -100,23 +104,25 @@ def test_make_field_map_eeg():
     # we must have trans if surface is in MRI coords
     assert_raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
 
-    evoked = pick_types_evoked(evoked, meg=False, eeg=True)
-    fmd = make_field_map(evoked, trans_fname=trans_fname,
+    evoked.pick_types(meg=False, eeg=True)
+    fmd = make_field_map(evoked, trans_fname,
                          subject='sample', subjects_dir=subjects_dir)
 
     # trans is necessary for EEG only
-    assert_raises(RuntimeError, make_field_map, evoked, trans_fname=None,
+    assert_raises(RuntimeError, make_field_map, evoked, None,
                   subject='sample', subjects_dir=subjects_dir)
 
-    fmd = make_field_map(evoked, trans_fname=trans_fname,
+    fmd = make_field_map(evoked, trans_fname,
                          subject='sample', subjects_dir=subjects_dir)
     assert_true(len(fmd) == 1)
-    assert_array_equal(fmd[0]['data'].shape, (2562, 59))  # maps data onto surf
+    assert_array_equal(fmd[0]['data'].shape, (642, 59))  # maps data onto surf
     assert_true(len(fmd[0]['ch_names']), 59)
 
 
+ at testing.requires_testing_data
+ at slow_test
 def test_make_field_map_meg():
-    """Test interpolation of MEG field onto helmet
+    """Test interpolation of MEG field onto helmet | head
     """
     evoked = read_evokeds(evoked_fname, condition='Left Auditory')
     info = evoked.info
@@ -129,7 +135,7 @@ def test_make_field_map_meg():
     assert_raises(ValueError, _make_surface_mapping, info, surf, 'meg',
                   mode='foo')
     # no picks
-    evoked_eeg = pick_types_evoked(evoked, meg=False, eeg=True)
+    evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
     assert_raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
                   surf, 'meg')
     # bad surface def
@@ -143,11 +149,75 @@ def test_make_field_map_meg():
     surf['coord_frame'] = cf
 
     # now do it with make_field_map
-    evoked = pick_types_evoked(evoked, meg=True, eeg=False)
-    fmd = make_field_map(evoked, trans_fname=None,
+    evoked.pick_types(meg=True, eeg=False)
+    fmd = make_field_map(evoked, None,
                          subject='sample', subjects_dir=subjects_dir)
     assert_true(len(fmd) == 1)
     assert_array_equal(fmd[0]['data'].shape, (304, 106))  # maps data onto surf
     assert_true(len(fmd[0]['ch_names']), 106)
 
     assert_raises(ValueError, make_field_map, evoked, ch_type='foobar')
+
+    # now test the make_field_map on head surf for MEG
+    evoked.pick_types(meg=True, eeg=False)
+    fmd = make_field_map(evoked, trans_fname, meg_surf='head',
+                         subject='sample', subjects_dir=subjects_dir)
+    assert_true(len(fmd) == 1)
+    assert_array_equal(fmd[0]['data'].shape, (642, 106))  # maps data onto surf
+    assert_true(len(fmd[0]['ch_names']), 106)
+
+    assert_raises(ValueError, make_field_map, evoked, meg_surf='foobar',
+                  subjects_dir=subjects_dir, trans=trans_fname)
+
+
+def _setup_args(info):
+    """Helper to test_as_meg_type_evoked."""
+    coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
+    my_origin, int_rad, noise, lut_fun, n_fact = _setup_dots('fast',
+                                                             coils,
+                                                             'meg')
+    args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
+                     ch_type='meg', lut=lut_fun, n_fact=n_fact)
+    return args_dict
+
+
+ at testing.requires_testing_data
+def test_as_meg_type_evoked():
+    """Test interpolation of data on to virtual channels."""
+
+    # validation tests
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory')
+    assert_raises(ValueError, evoked.as_type, 'meg')
+    assert_raises(ValueError, evoked.copy().pick_types(meg='grad').as_type,
+                  'meg')
+
+    # channel names
+    ch_names = evoked.info['ch_names']
+    virt_evoked = evoked.pick_channels(ch_names=ch_names[:10:1],
+                                       copy=True).as_type('mag')
+    assert_true(all('_virtual' in ch for ch in virt_evoked.info['ch_names']))
+
+    # pick from and to channels
+    evoked_from = evoked.pick_channels(ch_names=ch_names[2:10:3], copy=True)
+    evoked_to = evoked.pick_channels(ch_names=ch_names[0:10:3], copy=True)
+
+    info_from, info_to = evoked_from.info, evoked_to.info
+
+    # set up things
+    args1, args2 = _setup_args(info_from), _setup_args(info_to)
+    args1.update(coils2=args2['coils1'])
+    args2.update(coils2=args1['coils1'])
+
+    # test cross dots
+    cross_dots1 = _do_cross_dots(**args1)
+    cross_dots2 = _do_cross_dots(**args2)
+
+    assert_array_almost_equal(cross_dots1, cross_dots2.T)
+
+    # correlation test
+    evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
+    data1 = evoked.pick_types(meg='grad').data.ravel()
+    data2 = evoked.as_type('grad').data.ravel()
+    assert_true(np.corrcoef(data1, data2)[0, 1] > 0.95)
+
+run_tests_if_main()
diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py
index 3ecf7a6..eee1cdb 100644
--- a/mne/forward/tests/test_forward.py
+++ b/mne/forward/tests/test_forward.py
@@ -1,41 +1,39 @@
 import os
 import os.path as op
 import warnings
+import gc
 
 from nose.tools import assert_true, assert_raises
 import numpy as np
 from numpy.testing import (assert_array_almost_equal, assert_equal,
                            assert_array_equal, assert_allclose)
 
-from mne.datasets import sample
-from mne.io import Raw
+from mne.datasets import testing
 from mne import (read_forward_solution, apply_forward, apply_forward_raw,
                  average_forward_solutions, write_forward_solution,
                  convert_forward_solution)
 from mne import SourceEstimate, pick_types_forward, read_evokeds
 from mne.label import read_label
-from mne.utils import requires_mne, run_subprocess, _TempDir
+from mne.utils import (requires_mne, run_subprocess, _TempDir,
+                       run_tests_if_main, slow_test)
 from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
                          Forward)
 
-data_path = sample.data_path(download=False)
-fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif')
+data_path = testing.data_path(download=False)
 fname_meeg = op.join(data_path, 'MEG', 'sample',
-                     'sample_audvis-meg-eeg-oct-6-fwd.fif')
+                     'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
+                          'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
 
 fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
 
 fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                        'data', 'test-ave.fif')
-fname_mri = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
+fname_mri = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-trans.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
-fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
-temp_dir = _TempDir()
-# make a file that exists with some data in it
-existing_file = op.join(temp_dir, 'test.fif')
-with open(existing_file, 'w') as fid:
-    fid.write('aoeu')
+fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
 
 
 def compare_forwards(f1, f2):
@@ -44,100 +42,106 @@ def compare_forwards(f1, f2):
     assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
     assert_allclose(f1['source_nn'], f2['source_nn'])
     if f1['sol_grad'] is not None:
+        assert_true(f2['sol_grad'] is not None)
         assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
         assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
     else:
-        assert_equal(f2['sol_grad'], None)
+        assert_true(f2['sol_grad'] is None)
     assert_equal(f1['source_ori'], f2['source_ori'])
     assert_equal(f1['surf_ori'], f2['surf_ori'])
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_convert_forward():
     """Test converting forward solution between different representations
     """
-    fwd = read_forward_solution(fname_meeg)
-    print(fwd)  # __repr__
+    fwd = read_forward_solution(fname_meeg_grad)
+    assert_true(repr(fwd))
     assert_true(isinstance(fwd, Forward))
     # look at surface orientation
     fwd_surf = convert_forward_solution(fwd, surf_ori=True)
-    fwd_surf_io = read_forward_solution(fname_meeg, surf_ori=True)
+    fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
     compare_forwards(fwd_surf, fwd_surf_io)
+    del fwd_surf_io
+    gc.collect()
     # go back
     fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
-    print(fwd_new)
-    assert_true(isinstance(fwd, Forward))
+    assert_true(repr(fwd_new))
+    assert_true(isinstance(fwd_new, Forward))
     compare_forwards(fwd, fwd_new)
     # now go to fixed
     fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
                                          force_fixed=True)
-    print(fwd_fixed)
+    del fwd_surf
+    gc.collect()
+    assert_true(repr(fwd_fixed))
     assert_true(isinstance(fwd_fixed, Forward))
-    fwd_fixed_io = read_forward_solution(fname_meeg, surf_ori=False,
+    fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
                                          force_fixed=True)
     compare_forwards(fwd_fixed, fwd_fixed_io)
+    del fwd_fixed_io
+    gc.collect()
     # now go back to cartesian (original condition)
     fwd_new = convert_forward_solution(fwd_fixed)
-    print(fwd_new)
+    assert_true(repr(fwd_new))
     assert_true(isinstance(fwd_new, Forward))
     compare_forwards(fwd, fwd_new)
+    del fwd, fwd_new, fwd_fixed
+    gc.collect()
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_io_forward():
     """Test IO for forward solutions
     """
-    # test M/EEG
-    fwd_meeg = read_forward_solution(fname_meeg)
-    assert_true(isinstance(fwd_meeg, Forward))
-    leadfield = fwd_meeg['sol']['data']
-    assert_equal(leadfield.shape, (366, 22494))
-    assert_equal(len(fwd_meeg['sol']['row_names']), 366)
-    fname_temp = op.join(temp_dir, 'test-fwd.fif')
-    write_forward_solution(fname_temp, fwd_meeg, overwrite=True)
-
-    fwd_meeg = read_forward_solution(fname_temp)
-    assert_allclose(leadfield, fwd_meeg['sol']['data'])
-    assert_equal(len(fwd_meeg['sol']['row_names']), 366)
-
-    # now do extensive tests with MEG
-    fwd = read_forward_solution(fname)
-    fwd = read_forward_solution(fname, surf_ori=True)
+    temp_dir = _TempDir()
+    # do extensive tests with MEEG + grad
+    n_channels, n_src = 366, 108
+    fwd = read_forward_solution(fname_meeg_grad)
+    assert_true(isinstance(fwd, Forward))
+    fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
     leadfield = fwd['sol']['data']
-    assert_equal(leadfield.shape, (306, 22494))
-    assert_equal(len(fwd['sol']['row_names']), 306)
+    assert_equal(leadfield.shape, (n_channels, n_src))
+    assert_equal(len(fwd['sol']['row_names']), n_channels)
     fname_temp = op.join(temp_dir, 'test-fwd.fif')
     write_forward_solution(fname_temp, fwd, overwrite=True)
 
-    fwd = read_forward_solution(fname, surf_ori=True)
+    fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
     fwd_read = read_forward_solution(fname_temp, surf_ori=True)
     leadfield = fwd_read['sol']['data']
-    assert_equal(leadfield.shape, (306, 22494))
-    assert_equal(len(fwd_read['sol']['row_names']), 306)
-    assert_equal(len(fwd_read['info']['chs']), 306)
+    assert_equal(leadfield.shape, (n_channels, n_src))
+    assert_equal(len(fwd_read['sol']['row_names']), n_channels)
+    assert_equal(len(fwd_read['info']['chs']), n_channels)
     assert_true('dev_head_t' in fwd_read['info'])
     assert_true('mri_head_t' in fwd_read)
     assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
 
-    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
     leadfield = fwd['sol']['data']
-    assert_equal(leadfield.shape, (306, 22494 / 3))
-    assert_equal(len(fwd['sol']['row_names']), 306)
-    assert_equal(len(fwd['info']['chs']), 306)
+    assert_equal(leadfield.shape, (n_channels, n_src / 3))
+    assert_equal(len(fwd['sol']['row_names']), n_channels)
+    assert_equal(len(fwd['info']['chs']), n_channels)
     assert_true('dev_head_t' in fwd['info'])
     assert_true('mri_head_t' in fwd)
     assert_true(fwd['surf_ori'])
 
     # test warnings on bad filenames
+    fwd = read_forward_solution(fname_meeg_grad)
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
         fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
-        write_forward_solution(fwd_badname, fwd_meeg)
+        write_forward_solution(fwd_badname, fwd)
         read_forward_solution(fwd_badname)
     assert_true(len(w) == 2)
 
+    fwd = read_forward_solution(fname_meeg)
+    write_forward_solution(fname_temp, fwd, overwrite=True)
+    fwd_read = read_forward_solution(fname_temp)
+    compare_forwards(fwd, fwd_read)
+
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_apply_forward():
     """Test projection of source space data to sensor space
     """
@@ -147,7 +151,7 @@ def test_apply_forward():
     sfreq = 10.0
     t_start = 0.123
 
-    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True)
     assert_true(isinstance(fwd, Forward))
 
@@ -160,7 +164,8 @@ def test_apply_forward():
     # Evoked
     with warnings.catch_warnings(record=True) as w:
         evoked = read_evokeds(fname_evoked, condition=0)
-        evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
+        evoked.pick_types(meg=True)
+        evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
         assert_equal(len(w), 2)
         data = evoked.data
         times = evoked.times
@@ -172,18 +177,20 @@ def test_apply_forward():
         assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
 
         # Raw
-        raw = Raw(fname_raw)
-        raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
+        raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start,
+                                     stop=stop)
         data, times = raw_proj[:, :]
 
         # do some tests
         assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
         assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
-        assert_array_almost_equal(times[0], t_start)
-        assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
+        atol = 1. / sfreq
+        assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
+        assert_allclose(raw_proj.last_samp / sfreq,
+                        t_start + (n_times - 1) / sfreq, atol=atol)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_restrict_forward_to_stc():
     """Test restriction of source space to source SourceEstimate
     """
@@ -193,7 +200,7 @@ def test_restrict_forward_to_stc():
     sfreq = 10.0
     t_start = 0.123
 
-    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True)
 
     vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
@@ -209,7 +216,7 @@ def test_restrict_forward_to_stc():
     assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
     assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
 
-    fwd = read_forward_solution(fname, force_fixed=False)
+    fwd = read_forward_solution(fname_meeg, force_fixed=False)
     fwd = pick_types_forward(fwd, meg=True)
 
     vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
@@ -225,11 +232,11 @@ def test_restrict_forward_to_stc():
     assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_restrict_forward_to_label():
     """Test restriction of source space to label
     """
-    fwd = read_forward_solution(fname, force_fixed=True)
+    fwd = read_forward_solution(fname_meeg, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True)
 
     label_path = op.join(data_path, 'MEG', 'sample', 'labels')
@@ -243,8 +250,8 @@ def test_restrict_forward_to_label():
     src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
 
     src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
-    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
-                  + len(fwd['src'][0]['vertno']))
+    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
+                  len(fwd['src'][0]['vertno']))
 
     assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
     assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
@@ -252,7 +259,7 @@ def test_restrict_forward_to_label():
     assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
     assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
 
-    fwd = read_forward_solution(fname, force_fixed=False)
+    fwd = read_forward_solution(fname_meeg, force_fixed=False)
     fwd = pick_types_forward(fwd, meg=True)
 
     label_path = op.join(data_path, 'MEG', 'sample', 'labels')
@@ -266,8 +273,8 @@ def test_restrict_forward_to_label():
     src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
 
     src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
-    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
-                  + len(fwd['src'][0]['vertno']))
+    src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +
+                  len(fwd['src'][0]['vertno']))
 
     assert_equal(fwd_out['sol']['ncol'],
                  3 * (len(src_sel_lh) + len(src_sel_rh)))
@@ -277,12 +284,13 @@ def test_restrict_forward_to_label():
     assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_mne
 def test_average_forward_solution():
     """Test averaging forward solutions
     """
-    fwd = read_forward_solution(fname)
+    temp_dir = _TempDir()
+    fwd = read_forward_solution(fname_meeg)
     # input not a list
     assert_raises(TypeError, average_forward_solutions, 1)
     # list is too short
@@ -305,7 +313,7 @@ def test_average_forward_solution():
     fwd_copy['sol']['data'] *= 0.5
     fname_copy = op.join(temp_dir, 'copy-fwd.fif')
     write_forward_solution(fname_copy, fwd_copy, overwrite=True)
-    cmd = ('mne_average_forward_solutions', '--fwd', fname, '--fwd',
+    cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
            fname_copy, '--out', fname_copy)
     run_subprocess(cmd)
 
@@ -314,3 +322,10 @@ def test_average_forward_solution():
     assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
     # fwd_ave_mne = read_forward_solution(fname_copy)
     # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
+
+    # with gradient
+    fwd = read_forward_solution(fname_meeg_grad)
+    fwd_ave = average_forward_solutions([fwd, fwd])
+    compare_forwards(fwd, fwd_ave)
+
+run_tests_if_main()
diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py
index 4fb018e..dba5d58 100644
--- a/mne/forward/tests/test_make_forward.py
+++ b/mne/forward/tests/test_make_forward.py
@@ -6,41 +6,46 @@ from subprocess import CalledProcessError
 import warnings
 
 from nose.tools import assert_raises, assert_true
+import numpy as np
 from numpy.testing import (assert_equal, assert_allclose)
 
-from mne.datasets import sample
-from mne.io import Raw
-from mne.io import read_raw_kit
-from mne.io import read_raw_bti
+from mne.datasets import testing
+from mne.io import Raw, read_raw_kit, read_raw_bti, read_info
+from mne.io.constants import FIFF
 from mne import (read_forward_solution, make_forward_solution,
-                 do_forward_solution, setup_source_space, read_trans,
-                 convert_forward_solution)
-from mne.utils import requires_mne, _TempDir
-from mne.tests.test_source_space import _compare_source_spaces
+                 do_forward_solution, read_trans,
+                 convert_forward_solution, setup_volume_source_space,
+                 read_source_spaces, make_sphere_model,
+                 pick_types_forward, pick_info, pick_types, Transform)
+from mne.utils import (requires_mne, requires_nibabel, _TempDir,
+                       run_tests_if_main, slow_test, run_subprocess)
+from mne.forward._make_forward import _create_meg_coils
+from mne.forward._compute_forward import _magnetic_dipole_field_vec
 from mne.forward import Forward
+from mne.source_space import (get_volume_labels_from_aseg,
+                              _compare_source_spaces, setup_source_space)
 
-data_path = sample.data_path(download=False)
-fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif')
+data_path = testing.data_path(download=False)
 fname_meeg = op.join(data_path, 'MEG', 'sample',
-                     'sample_audvis-meg-eeg-oct-6-fwd.fif')
-
+                     'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
 fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
-
 fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                        'data', 'test-ave.fif')
-fname_mri = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
-temp_dir = _TempDir()
-
-# make a file that exists with some data in it
-existing_file = op.join(temp_dir, 'test.fif')
-with open(existing_file, 'w') as fid:
-    fid.write('aoeu')
+fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
+fname_bem = op.join(subjects_dir, 'sample', 'bem',
+                    'sample-1280-1280-1280-bem-sol.fif')
+fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
+                        'sample-1280-bem-sol.fif')
 
 
 def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
-                      meg_rtol=1e-4, meg_atol=1e-9):
+                      meg_rtol=1e-4, meg_atol=1e-9,
+                      eeg_rtol=1e-3, eeg_atol=1e-3):
     """Helper to test forwards"""
     # check source spaces
     assert_equal(len(fwd['src']), len(fwd_py['src']))
@@ -63,32 +68,48 @@ def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
         assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
 
         # check MEG
-        print('check MEG')
         assert_allclose(fwd['sol']['data'][:306],
                         fwd_py['sol']['data'][:306],
-                        rtol=meg_rtol, atol=meg_atol)
+                        rtol=meg_rtol, atol=meg_atol,
+                        err_msg='MEG mismatch')
         # check EEG
         if fwd['sol']['data'].shape[0] > 306:
-            print('check EEG')
             assert_allclose(fwd['sol']['data'][306:],
                             fwd_py['sol']['data'][306:],
-                            rtol=1e-3, atol=1e-3)
+                            rtol=eeg_rtol, atol=eeg_atol,
+                            err_msg='EEG mismatch')
 
 
- at sample.requires_sample_data
+def test_magnetic_dipole():
+    """Test basic magnetic dipole forward calculation
+    """
+    trans = Transform('mri', 'head', np.eye(4))
+    info = read_info(fname_raw)
+    picks = pick_types(info, meg=True, eeg=False, exclude=[])
+    info = pick_info(info, picks[:12])
+    coils = _create_meg_coils(info['chs'], 'normal', trans)
+    # magnetic dipole at device origin
+    r0 = np.array([0., 13., -6.])
+    for ch, coil in zip(info['chs'], coils):
+        rr = (ch['loc'][:3] + r0) / 2.
+        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
+        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
+        ratio = 8. if ch['ch_name'][-1] == '1' else 16.  # grad vs mag
+        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
+
+
+ at testing.requires_testing_data
 @requires_mne
 def test_make_forward_solution_kit():
     """Test making fwd using KIT, BTI, and CTF (compensated) files
     """
-    fname_bem = op.join(subjects_dir, 'sample', 'bem',
-                        'sample-5120-bem-sol.fif')
     kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
                       'tests', 'data')
     sqd_path = op.join(kit_dir, 'test.sqd')
     mrk_path = op.join(kit_dir, 'test_mrk.sqd')
     elp_path = op.join(kit_dir, 'test_elp.txt')
     hsp_path = op.join(kit_dir, 'test_hsp.txt')
-    mri_path = op.join(kit_dir, 'trans-sample.fif')
+    trans_path = op.join(kit_dir, 'trans-sample.fif')
     fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
 
     bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
@@ -101,93 +122,124 @@ def test_make_forward_solution_kit():
     fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                             'data', 'test_ctf_comp_raw.fif')
 
-    # first set up a testing source space
-    fname_src = op.join(temp_dir, 'oct2-src.fif')
-    src = setup_source_space('sample', fname_src, 'oct2',
-                             subjects_dir=subjects_dir)
+    # first set up a small testing source space
+    temp_dir = _TempDir()
+    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
+    src = setup_source_space('sample', fname_src_small, 'oct2',
+                             subjects_dir=subjects_dir, add_dist=False)
+    n_src = 108  # this is the resulting # of verts in fwd
 
     # first use mne-C: convert file, make forward solution
-    fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src,
-                              mindist=0.0, bem=fname_bem, mri=mri_path,
+    fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
+                              bem=fname_bem_meg, mri=trans_path,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
     assert_true(isinstance(fwd, Forward))
 
     # now let's use python with the same raw file
-    fwd_py = make_forward_solution(fname_kit_raw, mindist=0.0,
-                                   src=src, eeg=False, meg=True,
-                                   bem=fname_bem, mri=mri_path)
-    _compare_forwards(fwd, fwd_py, 157, 108)
+    fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
+                                   fname_bem_meg, eeg=False, meg=True)
+    _compare_forwards(fwd, fwd_py, 157, n_src)
     assert_true(isinstance(fwd_py, Forward))
 
     # now let's use mne-python all the way
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
     # without ignore_ref=True, this should throw an error:
     assert_raises(NotImplementedError, make_forward_solution, raw_py.info,
-                  mindist=0.0, src=src, eeg=False, meg=True,
-                  bem=fname_bem, mri=mri_path)
-    fwd_py = make_forward_solution(raw_py.info, mindist=0.0,
-                                   src=src, eeg=False, meg=True,
-                                   bem=fname_bem, mri=mri_path,
+                  src=src, eeg=False, meg=True,
+                  bem=fname_bem_meg, trans=trans_path)
+
+    # check that asking for eeg channels (even if they don't exist) is handled
+    meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
+                                                      eeg=False))
+    fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
+                                   bem=fname_bem_meg, trans=trans_path,
                                    ignore_ref=True)
-    _compare_forwards(fwd, fwd_py, 157, 108,
+    _compare_forwards(fwd, fwd_py, 157, n_src,
                       meg_rtol=1e-3, meg_atol=1e-7)
 
     # BTI python end-to-end versus C
-    fwd = do_forward_solution('sample', fname_bti_raw, src=fname_src,
-                              mindist=0.0, bem=fname_bem, mri=mri_path,
+    fwd = do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
+                              bem=fname_bem_meg, mri=trans_path,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
     raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs)
-    fwd_py = make_forward_solution(raw_py.info, mindist=0.0,
-                                   src=src, eeg=False, meg=True,
-                                   bem=fname_bem, mri=mri_path)
-    _compare_forwards(fwd, fwd_py, 248, 108)
+    fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
+                                   bem=fname_bem_meg, trans=trans_path)
+    _compare_forwards(fwd, fwd_py, 248, n_src)
 
     # now let's test CTF w/compensation
-    fwd_py = make_forward_solution(fname_ctf_raw, mindist=0.0,
-                                   src=src, eeg=False, meg=True,
-                                   bem=fname_bem, mri=fname_mri)
+    fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
+                                   fname_bem_meg, eeg=False, meg=True)
 
-    fwd = do_forward_solution('sample', fname_ctf_raw, src=fname_src,
-                              mindist=0.0, bem=fname_bem, mri=fname_mri,
+    fwd = do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
+                              src=fname_src_small, bem=fname_bem_meg,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
-    _compare_forwards(fwd, fwd_py, 274, 108)
+    _compare_forwards(fwd, fwd_py, 274, n_src)
 
     # CTF with compensation changed in python
     ctf_raw = Raw(fname_ctf_raw, compensation=2)
 
-    fwd_py = make_forward_solution(ctf_raw.info, mindist=0.0,
-                                   src=src, eeg=False, meg=True,
-                                   bem=fname_bem, mri=fname_mri)
+    fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
+                                   fname_bem_meg, eeg=False, meg=True)
     with warnings.catch_warnings(record=True):
-        fwd = do_forward_solution('sample', ctf_raw, src=fname_src,
-                                  mindist=0.0, bem=fname_bem, mri=fname_mri,
+        fwd = do_forward_solution('sample', ctf_raw, mri=fname_trans,
+                                  src=fname_src_small, bem=fname_bem_meg,
                                   eeg=False, meg=True,
                                   subjects_dir=subjects_dir)
-    _compare_forwards(fwd, fwd_py, 274, 108)
+    _compare_forwards(fwd, fwd_py, 274, n_src)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_make_forward_solution():
     """Test making M-EEG forward solution from python
     """
-    fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
-    fname_bem = op.join(subjects_dir, 'sample', 'bem',
-                        'sample-5120-5120-5120-bem-sol.fif')
-    fwd_py = make_forward_solution(fname_raw, mindist=5.0,
-                                   src=fname_src, eeg=True, meg=True,
-                                   bem=fname_bem, mri=fname_mri)
+    fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
+                                   fname_bem, mindist=5.0, eeg=True, meg=True)
     assert_true(isinstance(fwd_py, Forward))
     fwd = read_forward_solution(fname_meeg)
     assert_true(isinstance(fwd, Forward))
-    _compare_forwards(fwd, fwd_py, 366, 22494)
+    _compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
+ at requires_mne
+def test_make_forward_solution_sphere():
+    """Test making a forward solution with a sphere model"""
+    temp_dir = _TempDir()
+    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
+    src = setup_source_space('sample', fname_src_small, 'oct2',
+                             subjects_dir=subjects_dir, add_dist=False)
+    out_name = op.join(temp_dir, 'tmp-fwd.fif')
+    run_subprocess(['mne_forward_solution', '--meg', '--eeg',
+                    '--meas', fname_raw, '--src', fname_src_small,
+                    '--mri', fname_trans, '--fwd', out_name])
+    fwd = read_forward_solution(out_name)
+    sphere = make_sphere_model(verbose=True)
+    fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
+                                   meg=True, eeg=True, verbose=True)
+    _compare_forwards(fwd, fwd_py, 366, 108,
+                      meg_rtol=5e-1, meg_atol=1e-6,
+                      eeg_rtol=5e-1, eeg_atol=5e-1)
+    # Since the above is pretty lax, let's check a different way
+    for meg, eeg in zip([True, False], [False, True]):
+        fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
+        fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
+        assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
+                                    fwd_py_['sol']['data'].ravel())[0, 1],
+                        1.0, rtol=1e-3)
+
+
+ at testing.requires_testing_data
 @requires_mne
 def test_do_forward_solution():
     """Test wrapping forward solution from python
     """
-    mri = read_trans(fname_mri)
+    temp_dir = _TempDir()
+    existing_file = op.join(temp_dir, 'test.fif')
+    with open(existing_file, 'w') as fid:
+        fid.write('aoeu')
+
+    mri = read_trans(fname_trans)
     fname_fake = op.join(temp_dir, 'no_have.fif')
 
     # ## Error checks
@@ -225,25 +277,80 @@ def test_do_forward_solution():
                   mri=mri, eeg=False, meg=False, subjects_dir=subjects_dir)
     # mindist as non-integer
     assert_raises(TypeError, do_forward_solution, 'sample', fname_raw,
-                  mri=fname_mri, mindist=dict(), subjects_dir=subjects_dir)
+                  mri=fname_trans, mindist=dict(), subjects_dir=subjects_dir)
     # mindist as string but not 'all'
     assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
-                  mri=fname_mri, eeg=False, mindist='yall',
+                  mri=fname_trans, eeg=False, mindist='yall',
                   subjects_dir=subjects_dir)
     # src, spacing, and bem as non-str
     assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
-                  mri=fname_mri, src=1, subjects_dir=subjects_dir)
+                  mri=fname_trans, src=1, subjects_dir=subjects_dir)
     assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
-                  mri=fname_mri, spacing=1, subjects_dir=subjects_dir)
+                  mri=fname_trans, spacing=1, subjects_dir=subjects_dir)
     assert_raises(ValueError, do_forward_solution, 'sample', fname_raw,
-                  mri=fname_mri, bem=1, subjects_dir=subjects_dir)
+                  mri=fname_trans, bem=1, subjects_dir=subjects_dir)
     # no overwrite flag
     assert_raises(IOError, do_forward_solution, 'sample', fname_raw,
-                  existing_file, mri=fname_mri, subjects_dir=subjects_dir)
+                  existing_file, mri=fname_trans, subjects_dir=subjects_dir)
     # let's catch an MNE error, this time about trans being wrong
     assert_raises(CalledProcessError, do_forward_solution, 'sample',
-                  fname_raw, existing_file, trans=fname_mri, overwrite=True,
+                  fname_raw, existing_file, trans=fname_trans, overwrite=True,
                   spacing='oct6', subjects_dir=subjects_dir)
 
     # No need to actually calculate and check here, since it's effectively
     # done in previous tests.
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_nibabel(False)
+def test_forward_mixed_source_space():
+    """Test making the forward solution for a mixed source space
+    """
+    temp_dir = _TempDir()
+    # get the surface source space
+    surf = read_source_spaces(fname_src)
+
+    # setup two volume source spaces
+    label_names = get_volume_labels_from_aseg(fname_aseg)
+    vol_labels = [label_names[int(np.random.rand() * len(label_names))]
+                  for _ in range(2)]
+    vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
+                                     mri=fname_aseg,
+                                     volume_label=vol_labels[0],
+                                     add_interpolator=False)
+    vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
+                                     mri=fname_aseg,
+                                     volume_label=vol_labels[1],
+                                     add_interpolator=False)
+
+    # merge surfaces and volume
+    src = surf + vol1 + vol2
+
+    # calculate forward solution
+    fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem, None)
+    assert_true(repr(fwd))
+
+    # extract source spaces
+    src_from_fwd = fwd['src']
+
+    # get the coordinate frame of each source space
+    coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
+
+    # assert that all source spaces are in head coordinates
+    assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
+
+    # run tests for SourceSpaces.export_volume
+    fname_img = op.join(temp_dir, 'temp-image.mgz')
+
+    # head coordinates and mri_resolution, but trans file
+    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
+                  mri_resolution=True, trans=None)
+
+    # head coordinates and mri_resolution, but wrong trans file
+    vox_mri_t = vol1[0]['vox_mri_t']
+    assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
+                  mri_resolution=True, trans=vox_mri_t)
+
+
+run_tests_if_main()
diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py
index 94fd9e4..f9f66fc 100644
--- a/mne/gui/__init__.py
+++ b/mne/gui/__init__.py
@@ -21,8 +21,8 @@ def combine_kit_markers():
     return gui
 
 
-def coregistration(tabbed=False, split=True, scene_width=0o1, raw=None,
-                   subject=None, subjects_dir=None):
+def coregistration(tabbed=False, split=True, scene_width=0o1, inst=None,
+                   subject=None, subjects_dir=None, raw=None):
     """Coregister an MRI with a subject's head shape
 
     Parameters
@@ -35,8 +35,9 @@ def coregistration(tabbed=False, split=True, scene_width=0o1, raw=None,
         unnecessary for wx backend).
     scene_width : int
         Specify a minimum width for the 3d scene (in pixels).
-    raw : None | str(path)
-        Path to a raw file containing the digitizer data.
+    inst : None | str
+        Path to an instance file containing the digitizer data. Compatible for
+        Raw, Epochs, and Evoked files.
     subject : None | str
         Name of the mri subject.
     subjects_dir : None | path
@@ -53,9 +54,14 @@ def coregistration(tabbed=False, split=True, scene_width=0o1, raw=None,
     <http://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
     """
     _check_mayavi_version()
+    if raw is not None:
+        raise DeprecationWarning('The `raw` argument has been deprecated for '
+                                 'the `inst` argument. Will be removed '
+                                 'in 0.11. Use `inst` instead.')
+        inst = raw
     from ._coreg_gui import CoregFrame, _make_view
     view = _make_view(tabbed, split, scene_width)
-    gui = CoregFrame(raw, subject, subjects_dir)
+    gui = CoregFrame(inst, subject, subjects_dir)
     gui.configure_traits(view=view)
     return gui
 
diff --git a/mne/gui/_coreg_gui.py b/mne/gui/_coreg_gui.py
index 6b327a0..3a9493d 100644
--- a/mne/gui/_coreg_gui.py
+++ b/mne/gui/_coreg_gui.py
@@ -29,51 +29,26 @@ try:
     from tvtk.pyface.scene_editor import SceneEditor
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    Handler = object
-    cached_property = trait_wraith
-    on_trait_change = trait_wraith
-    MayaviScene = trait_wraith
-    MlabSceneModel = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    DelegatesTo = trait_wraith
-    Directory = trait_wraith
-    Enum = trait_wraith
-    Float = trait_wraith
-    Instance = trait_wraith
-    Int = trait_wraith
-    Property = trait_wraith
-    Str = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    Group = trait_wraith
-    HGroup = trait_wraith
-    VGroup = trait_wraith
-    VGrid = trait_wraith
-    EnumEditor = trait_wraith
-    Label = trait_wraith
-    TextEditor = trait_wraith
-    Action = trait_wraith
-    UndoButton = trait_wraith
-    CancelButton = trait_wraith
-    NoButtons = trait_wraith
-    SceneEditor = trait_wraith
+    HasTraits = HasPrivateTraits = Handler = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
+        Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
+        Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
+        EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
+        NoButtons = SceneEditor = trait_wraith
 
 
 from ..coreg import bem_fname, trans_fname
-from ..io.constants import FIFF
 from ..forward import prepare_bem_model
 from ..transforms import (write_trans, read_trans, apply_trans, rotation,
-                          translation, scaling, rotation_angles)
+                          translation, scaling, rotation_angles, Transform)
 from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
                      _point_cloud_error)
 from ..utils import get_subjects_dir, logger
 from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
-from ._file_traits import (set_mne_root, trans_wildcard, RawSource,
+from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
                            SubjectSelectorPanel)
-from ._viewer import defaults, HeadViewController, PointObject, SurfaceObject
+from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
+                      _testing_mode)
 
 
 laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
@@ -102,7 +77,7 @@ class CoregModel(HasPrivateTraits):
     """
     # data sources
     mri = Instance(MRIHeadWithFiducialsModel, ())
-    hsp = Instance(RawSource, ())
+    hsp = Instance(InstSource, ())
 
     # parameters
     grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
@@ -149,7 +124,7 @@ class CoregModel(HasPrivateTraits):
     can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
                                                        'subject_has_bem'])
     can_save = Property(Bool, depends_on=['head_mri_trans'])
-    raw_subject = Property(depends_on='hsp.raw_fname', desc="Subject guess "
+    raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
                            "based on the raw file name.")
 
     # transformed geometry
@@ -265,8 +240,7 @@ class CoregModel(HasPrivateTraits):
                 points[hair] += self.mri.norms[hair] * scaled_hair_dist
                 return points
             else:
-                msg = "Norms missing form bem, can't grow hair"
-                error(None, msg)
+                error(None, "Norms missing form bem, can't grow hair")
                 self.grow_hair = 0
         return self.mri.points
 
@@ -320,8 +294,8 @@ class CoregModel(HasPrivateTraits):
 
     @cached_property
     def _get_point_distance(self):
-        if (len(self.transformed_hsp_points) == 0
-                or len(self.transformed_mri_points) == 0):
+        if (len(self.transformed_hsp_points) == 0 or
+                len(self.transformed_mri_points) == 0):
             return
         dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
                       'euclidean')
@@ -343,9 +317,9 @@ class CoregModel(HasPrivateTraits):
         return "Average Points Error: %.1f mm" % (av_dist * 1000)
 
     def _get_raw_subject(self):
-        # subject name guessed based on the raw file name
-        if '_' in self.hsp.raw_fname:
-            subject, _ = self.hsp.raw_fname.split('_', 1)
+        # subject name guessed based on the inst file name
+        if '_' in self.hsp.inst_fname:
+            subject, _ = self.hsp.inst_fname.split('_', 1)
             if not subject:
                 subject = None
         else:
@@ -386,7 +360,7 @@ class CoregModel(HasPrivateTraits):
         mri_pts = self.transformed_mri_points
         point_distance = _point_cloud_error(hsp_pts, mri_pts)
         new_sub_filter = point_distance <= distance
-        n_excluded = np.sum(new_sub_filter == False)
+        n_excluded = np.sum(new_sub_filter == False)  # noqa
         logger.info("Coregistration: Excluding %i head shape points with "
                     "distance >= %.3f m.", n_excluded, distance)
 
@@ -600,10 +574,7 @@ class CoregModel(HasPrivateTraits):
         """
         if not self.can_save:
             raise RuntimeError("Not enough information for saving transform")
-        trans_matrix = self.head_mri_trans
-        trans = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,
-                 'trans': trans_matrix}
-        write_trans(fname, trans)
+        write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
 
 
 class CoregFrameHandler(Handler):
@@ -611,10 +582,9 @@ class CoregFrameHandler(Handler):
     """
     def close(self, info, is_ok):
         if info.object.queue.unfinished_tasks:
-            msg = ("Can not close the window while saving is still in "
-                   "progress. Please wait until all MRIs are processed.")
-            title = "Saving Still in Progress"
-            information(None, msg, title)
+            information(None, "Can not close the window while saving is still "
+                        "in progress. Please wait until all MRIs are "
+                        "processed.", "Saving Still in Progress")
             return False
         else:
             return True
@@ -774,8 +744,8 @@ class CoregPanel(HasPrivateTraits):
                              Item('rot_z', editor=laggy_float_editor,
                                   show_label=True, tooltip="Rotate along "
                                   "anterior-posterior axis"),
-                                  'rot_z_dec', 'rot_z_inc',
-                                  show_labels=False, columns=4),
+                             'rot_z_dec', 'rot_z_inc',
+                             show_labels=False, columns=4),
                        # buttons
                        HGroup(Item('fit_hsp_points',
                                    enabled_when='has_pts_data',
@@ -972,7 +942,7 @@ class CoregPanel(HasPrivateTraits):
                                   subject_from=subject_from,
                                   subject_to=subject_to)
             ui = mridlg.edit_traits(kind='modal')
-            if ui.result != True:
+            if ui.result != True:  # noqa
                 return
             subject_to = mridlg.subject_to
 
@@ -1037,10 +1007,10 @@ class CoregPanel(HasPrivateTraits):
 
     def _scale_z_dec_fired(self):
         step = 1. / self.scale_step
-        self.scale_x *= step
+        self.scale_z *= step
 
     def _scale_z_inc_fired(self):
-        self.scale_x *= self.scale_step
+        self.scale_z *= self.scale_step
 
     def _trans_x_dec_fired(self):
         self.trans_x -= self.trans_step
@@ -1083,7 +1053,7 @@ class NewMriDialog(HasPrivateTraits):
                      "subject"),
                 width=500,
                 buttons=[CancelButton,
-                           Action(name='OK', enabled_when='can_save')])
+                         Action(name='OK', enabled_when='can_save')])
 
     def _can_overwrite_changed(self, new):
         if not new:
@@ -1174,7 +1144,7 @@ def _make_view(tabbed=False, split=False, scene_width=-1):
                                       show_labels=False),
                                Item('omitted_info', style='readonly',
                                     show_label=False),
-                               label='Head Shape Source (Raw)',
+                               label='Head Shape Source (Raw/Epochs/Evoked)',
                                show_border=True, show_labels=False),
                         show_labels=False, label="Data Source")
 
@@ -1360,7 +1330,8 @@ class CoregFrame(HasTraits):
         self.sync_trait('hsp_visible', p, 'visible', mutual=False)
 
         on_pick = self.scene.mayavi_scene.on_mouse_pick
-        self.picker = on_pick(self.fid_panel._on_pick, type='cell')
+        if not _testing_mode():
+            self.picker = on_pick(self.fid_panel._on_pick, type='cell')
 
         self.headview.left = True
         self.scene.disable_render = False
diff --git a/mne/gui/_fiducials_gui.py b/mne/gui/_fiducials_gui.py
index a127480..e0a2ff2 100644
--- a/mne/gui/_fiducials_gui.py
+++ b/mne/gui/_fiducials_gui.py
@@ -7,7 +7,6 @@
 from glob import glob
 import os
 from ..externals.six.moves import map
-from ..externals.six.moves import zip
 
 # allow import without traits
 try:
@@ -23,26 +22,11 @@ try:
     from tvtk.pyface.scene_editor import SceneEditor
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    cached_property = trait_wraith
-    on_trait_change = trait_wraith
-    MayaviScene = trait_wraith
-    MlabSceneModel = trait_wraith
-    Array = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    DelegatesTo = trait_wraith
-    Enum = trait_wraith
-    Event = trait_wraith
-    Instance = trait_wraith
-    Property = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    HGroup = trait_wraith
-    VGroup = trait_wraith
-    SceneEditor = trait_wraith
-    NoButtons = trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
+        Array = Bool = Button = DelegatesTo = Enum = Event = Instance = \
+        Property = View = Item = HGroup = VGroup = SceneEditor = \
+        NoButtons = trait_wraith
 
 from ..coreg import fid_fname, fid_fname_general, head_bem_fname
 from ..io import write_fiducials
@@ -145,9 +129,9 @@ class MRIHeadWithFiducialsModel(HasPrivateTraits):
 
     @cached_property
     def _get_can_save_as(self):
-        can = not (np.all(self.nasion == self.lpa)
-                   or np.all(self.nasion == self.rpa)
-                   or np.all(self.lpa == self.rpa))
+        can = not (np.all(self.nasion == self.lpa) or
+                   np.all(self.nasion == self.rpa) or
+                   np.all(self.lpa == self.rpa))
         return can
 
     @cached_property
@@ -306,7 +290,9 @@ class FiducialsPanel(HasPrivateTraits):
         else:
             logger.debug("GUI: picked object other than MRI")
 
-        round_ = lambda x: round(x, 3)
+        def round_(x):
+            return round(x, 3)
+
         poss = [map(round_, pos) for pos in picker.picked_positions]
         pos = map(round_, picker.pick_position)
         msg = ["Pick Event: %i picked_positions:" % n_pos]
diff --git a/mne/gui/_file_traits.py b/mne/gui/_file_traits.py
index 6bc1b4a..fd59d7d 100644
--- a/mne/gui/_file_traits.py
+++ b/mne/gui/_file_traits.py
@@ -8,7 +8,6 @@ import os
 
 import numpy as np
 from ..externals.six.moves import map
-from ..externals.six.moves import zip
 
 # allow import without traits
 try:
@@ -21,30 +20,13 @@ try:
                             information)
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    cached_property = trait_wraith
-    on_trait_change = trait_wraith
-    Any = trait_wraith
-    Array = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    DelegatesTo = trait_wraith
-    Directory = trait_wraith
-    Enum = trait_wraith
-    Event = trait_wraith
-    File = trait_wraith
-    Instance = trait_wraith
-    Int = trait_wraith
-    List = trait_wraith
-    Property = trait_wraith
-    Str = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    VGroup = trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = Any = Array = Bool = Button = \
+        DelegatesTo = Directory = Enum = Event = File = Instance = \
+        Int = List = Property = Str = View = Item = VGroup = trait_wraith
 
 from ..io.constants import FIFF
-from ..io import Raw, read_fiducials
+from ..io import read_info, read_fiducials
 from ..surface import read_bem_surfaces
 from ..coreg import (_is_mri_subject, _mri_subject_has_bem,
                      create_default_subject)
@@ -76,24 +58,45 @@ def get_fs_home():
     If specified successfully, the resulting path is stored with
     mne.set_config().
     """
-    fs_home = get_config('FREESURFER_HOME')
-    problem = _fs_home_problem(fs_home)
+    return _get_root_home('FREESURFER_HOME', 'freesurfer', _fs_home_problem)
+
+
+def get_mne_root():
+    """Get the MNE_ROOT directory
+
+    Returns
+    -------
+    mne_root : None | str
+        The MNE_ROOT path or None if the user cancels.
+
+    Notes
+    -----
+    If MNE_ROOT can't be found, the user is prompted with a file dialog.
+    If specified successfully, the resulting path is stored with
+    mne.set_config().
+    """
+    return _get_root_home('MNE_ROOT', 'MNE', _mne_root_problem)
+
+
+def _get_root_home(cfg, name, check_fun):
+    root = get_config(cfg)
+    problem = check_fun(root)
     while problem:
-        info = ("Please select the FREESURFER_HOME directory. This is the "
-                "root directory of the freesurfer installation.")
+        info = ("Please select the %s directory. This is the root "
+                "directory of the %s installation." % (cfg, name))
         msg = '\n\n'.join((problem, info))
-        information(None, msg, "Select the FREESURFER_HOME Directory")
-        msg = "Please select the FREESURFER_HOME Directory"
+        information(None, msg, "Select the %s Directory" % cfg)
+        msg = "Please select the %s Directory" % cfg
         dlg = DirectoryDialog(message=msg, new_directory=False)
         if dlg.open() == OK:
-            fs_home = dlg.path
-            problem = _fs_home_problem(fs_home)
+            root = dlg.path
+            problem = check_fun(root)
             if problem is None:
-                set_config('FREESURFER_HOME', fs_home)
+                set_config(cfg, root)
         else:
             return None
+    return root
 
-    return fs_home
 
 def set_fs_home():
     """Set the FREESURFER_HOME environment variable
@@ -117,6 +120,7 @@ def set_fs_home():
         os.environ['FREESURFER_HOME'] = fs_home
         return True
 
+
 def _fs_home_problem(fs_home):
     """Check FREESURFER_HOME path
 
@@ -133,39 +137,6 @@ def _fs_home_problem(fs_home):
                     "subject." % fs_home)
 
 
-def get_mne_root():
-    """Get the MNE_ROOT directory
-
-    Returns
-    -------
-    mne_root : None | str
-        The MNE_ROOT path or None if the user cancels.
-
-    Notes
-    -----
-    If MNE_ROOT can't be found, the user is prompted with a file dialog.
-    If specified successfully, the resulting path is stored with
-    mne.set_config().
-    """
-    mne_root = get_config('MNE_ROOT')
-    problem = _mne_root_problem(mne_root)
-    while problem:
-        info = ("Please select the MNE_ROOT directory. This is the root "
-                "directory of the MNE installation.")
-        msg = '\n\n'.join((problem, info))
-        information(None, msg, "Select the MNE_ROOT Directory")
-        msg = "Please select the MNE_ROOT Directory"
-        dlg = DirectoryDialog(message=msg, new_directory=False)
-        if dlg.open() == OK:
-            mne_root = dlg.path
-            problem = _mne_root_problem(mne_root)
-            if problem is None:
-                set_config('MNE_ROOT', mne_root)
-        else:
-            return None
-
-    return mne_root
-
 def set_mne_root(set_mne_bin=False):
     """Set the MNE_ROOT environment variable
 
@@ -197,6 +168,7 @@ def set_mne_root(set_mne_bin=False):
                 os.environ['PATH'] += ':' + mne_bin
         return True
 
+
 def _mne_root_problem(mne_root):
     """Check MNE_ROOT path
 
@@ -291,8 +263,8 @@ class FiducialsSource(HasTraits):
         return points
 
 
-class RawSource(HasPrivateTraits):
-    """Expose measurement information from a raw file
+class InstSource(HasPrivateTraits):
+    """Expose measurement information from a inst file
 
     Parameters
     ----------
@@ -307,22 +279,23 @@ class RawSource(HasPrivateTraits):
     """
     file = File(exists=True, filter=['*.fif'])
 
-    raw_fname = Property(Str, depends_on='file')
-    raw_dir = Property(depends_on='file')
-    raw = Property(depends_on='file')
+    inst_fname = Property(Str, depends_on='file')
+    inst_dir = Property(depends_on='file')
+    inst = Property(depends_on='file')
 
     points_filter = Any(desc="Index to select a subset of the head shape "
                         "points")
     n_omitted = Property(Int, depends_on=['points_filter'])
 
     # head shape
-    raw_points = Property(depends_on='raw', desc="Head shape points in the "
-                          "raw file(n x 3 array)")
-    points = Property(depends_on=['raw_points', 'points_filter'], desc="Head "
+    inst_points = Property(depends_on='inst', desc="Head shape points in the "
+                           "inst file(n x 3 array)")
+    points = Property(depends_on=['inst_points', 'points_filter'], desc="Head "
                       "shape points selected by the filter (n x 3 array)")
 
     # fiducials
-    fid_dig = Property(depends_on='raw', desc="Fiducial points (list of dict)")
+    fid_dig = Property(depends_on='inst', desc="Fiducial points "
+                       "(list of dict)")
     fid_points = Property(depends_on='fid_dig', desc="Fiducial points {ident: "
                           "point} dict}")
     lpa = Property(depends_on='fid_points', desc="LPA coordinates (1 x 3 "
@@ -333,59 +306,59 @@ class RawSource(HasPrivateTraits):
                    "array)")
 
     view = View(VGroup(Item('file'),
-                       Item('raw_fname', show_label=False, style='readonly')))
+                       Item('inst_fname', show_label=False, style='readonly')))
 
     @cached_property
     def _get_n_omitted(self):
         if self.points_filter is None:
             return 0
         else:
-            return np.sum(self.points_filter == False)
+            return np.sum(self.points_filter == False)  # noqa
 
     @cached_property
-    def _get_raw(self):
+    def _get_inst(self):
         if self.file:
-            return Raw(self.file)
+            return read_info(self.file)
 
     @cached_property
-    def _get_raw_dir(self):
+    def _get_inst_dir(self):
         return os.path.dirname(self.file)
 
     @cached_property
-    def _get_raw_fname(self):
+    def _get_inst_fname(self):
         if self.file:
             return os.path.basename(self.file)
         else:
             return '-'
 
     @cached_property
-    def _get_raw_points(self):
-        if not self.raw:
+    def _get_inst_points(self):
+        if not self.inst:
             return np.zeros((1, 3))
 
-        points = np.array([d['r'] for d in self.raw.info['dig']
+        points = np.array([d['r'] for d in self.inst['dig']
                            if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
         return points
 
     @cached_property
     def _get_points(self):
         if self.points_filter is None:
-            return self.raw_points
+            return self.inst_points
         else:
-            return self.raw_points[self.points_filter]
+            return self.inst_points[self.points_filter]
 
     @cached_property
     def _get_fid_dig(self):
         """Fiducials for info['dig']"""
-        if not self.raw:
+        if not self.inst:
             return []
-        dig = self.raw.info['dig']
+        dig = self.inst['dig']
         dig = [d for d in dig if d['kind'] == FIFF.FIFFV_POINT_CARDINAL]
         return dig
 
     @cached_property
     def _get_fid_points(self):
-        if not self.raw:
+        if not self.inst:
             return {}
         digs = dict((d['ident'], d) for d in self.fid_dig)
         return digs
diff --git a/mne/gui/_kit2fiff_gui.py b/mne/gui/_kit2fiff_gui.py
index f29a122..ee07198 100644
--- a/mne/gui/_kit2fiff_gui.py
+++ b/mne/gui/_kit2fiff_gui.py
@@ -5,11 +5,13 @@
 # License: BSD (3-clause)
 
 import os
-from ..externals.six.moves import queue
-from threading import Thread
-
 import numpy as np
 from scipy.linalg import inv
+from threading import Thread
+
+from ..externals.six.moves import queue
+from ..io.meas_info import _read_dig_points, _make_dig_points
+
 
 # allow import without traits
 try:
@@ -25,40 +27,19 @@ try:
     from tvtk.pyface.scene_editor import SceneEditor
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    Handler = object
-    cached_property = trait_wraith
-    MayaviScene = trait_wraith
-    MlabSceneModel = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    DelegatesTo = trait_wraith
-    Enum = trait_wraith
-    File = trait_wraith
-    Instance = trait_wraith
-    Int = trait_wraith
-    List = trait_wraith
-    Property = trait_wraith
-    Str = trait_wraith
-    Array = trait_wraith
-    spring = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    HGroup = trait_wraith
-    VGroup = trait_wraith
-    EnumEditor = trait_wraith
-    NoButtons = trait_wraith
-    CheckListEditor = trait_wraith
-    SceneEditor = trait_wraith
-
-from ..io.kit.coreg import read_hsp
+    HasTraits = HasPrivateTraits = Handler = object
+    cached_property = MayaviScene = MlabSceneModel = Bool = Button = \
+        DelegatesTo = Enum = File = Instance = Int = List = Property = \
+        Str = Array = spring = View = Item = HGroup = VGroup = EnumEditor = \
+        NoButtons = CheckListEditor = SceneEditor = trait_wraith
+
 from ..io.kit.kit import RawKIT, KIT
-from ..transforms import apply_trans, als_ras_trans, als_ras_trans_mm
-from ..coreg import (read_elp, _decimate_points, fit_matched_points,
-                     get_ras_to_neuromag_trans)
+from ..transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
+                          get_ras_to_neuromag_trans, Transform)
+from ..coreg import _decimate_points, fit_matched_points
 from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
-from ._viewer import HeadViewController, headview_item, PointObject
+from ._viewer import (HeadViewController, headview_item, PointObject,
+                      _testing_mode)
 
 
 use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
@@ -93,8 +74,8 @@ class Kit2FiffModel(HasPrivateTraits):
     stim_chs_manual = Array(int, (8,), range(168, 176))
     stim_slope = Enum("-", "+")
     # Marker Points
-    use_mrk = List(list(range(5)), desc="Which marker points to use for the device "
-                   "head coregistration.")
+    use_mrk = List(list(range(5)), desc="Which marker points to use for the "
+                   "device head coregistration.")
 
     # Derived Traits
     mrk = Property(depends_on=('markers.mrk3.points'))
@@ -127,8 +108,8 @@ class Kit2FiffModel(HasPrivateTraits):
         if not has_sqd:
             return False
 
-        has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp)
-                       and np.any(self.elp) and np.any(self.fid))
+        has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
+                       np.any(self.elp) and np.any(self.fid))
         if has_all_hsp:
             return True
 
@@ -170,7 +151,7 @@ class Kit2FiffModel(HasPrivateTraits):
             return
 
         try:
-            pts = read_elp(self.fid_file)
+            pts = _read_dig_points(self.fid_file)
             if len(pts) < 8:
                 raise ValueError("File contains %i points, need 8" % len(pts))
         except Exception as err:
@@ -202,7 +183,7 @@ class Kit2FiffModel(HasPrivateTraits):
     @cached_property
     def _get_hsp(self):
         if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
-            return  np.empty((0, 3))
+            return np.empty((0, 3))
         else:
             pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
             return pts
@@ -221,8 +202,7 @@ class Kit2FiffModel(HasPrivateTraits):
             return
 
         try:
-            pts = read_hsp(fname)
-
+            pts = _read_dig_points(fname)
             n_pts = len(pts)
             if n_pts > KIT.DIG_POINTS:
                 msg = ("The selected head shape contains {n_in} points, "
@@ -265,7 +245,7 @@ class Kit2FiffModel(HasPrivateTraits):
     def clear_all(self):
         """Clear all specified input parameters"""
         self.markers.clear = True
-        self.reset_traits(['sqd_file', 'hsp_file', 'fid_file'])
+        self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
 
     def get_event_info(self):
         """
@@ -297,8 +277,11 @@ class Kit2FiffModel(HasPrivateTraits):
                      slope=self.stim_slope)
 
         if np.any(self.fid):
-            raw._set_dig_neuromag(self.fid, self.elp, self.hsp,
-                                  self.dev_head_trans)
+            raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
+                                               self.fid[2], self.elp,
+                                               self.hsp)
+            raw.info['dev_head_t'] = Transform('meg', 'head',
+                                               self.dev_head_trans)
         return raw
 
 
@@ -354,51 +337,50 @@ class Kit2FiffPanel(HasPrivateTraits):
     queue_len_str = Property(Str, depends_on=['queue_len'])
     error = Str('')
 
-    view = View(VGroup(VGroup(Item('sqd_file', label="Data"),
-                              Item('sqd_fname', show_label=False,
-                                   style='readonly'),
-                              Item('hsp_file', label='Dig Head Shape'),
-                              Item('hsp_fname', show_label=False,
-                                   style='readonly'),
-                              Item('fid_file', label='Dig Points'),
-                              Item('fid_fname', show_label=False,
-                                   style='readonly'),
-                              Item('reset_dig', label='Clear Digitizer Files',
-                                   show_label=False),
-                              Item('use_mrk', editor=use_editor,
-                                   style='custom'),
-                              label="Sources", show_border=True),
-                    VGroup(Item('stim_slope', label="Event Onset",
-                                style='custom',
-                                editor=EnumEditor(
-                                           values={'+': '2:Peak (0 to 5 V)',
-                                                   '-': '1:Trough (5 to 0 V)'},
-                                           cols=2),
-                                help="Whether events are marked by a decrease "
-                                "(trough) or an increase (peak) in trigger "
-                                "channel values"),
-                           Item('stim_chs', label="Binary Coding",
-                                style='custom',
-                                editor=EnumEditor(values={'>': '1:1 ... 128',
-                                                          '<': '3:128 ... 1',
-                                                          'man': '2:Manual'},
-                                                  cols=2),
-                                help="Specifies the bit order in event "
-                                "channels. Assign the first bit (1) to the "
-                                "first or the last trigger channel."),
-                           Item('stim_chs_manual', label='Stim Channels',
-                                style='custom',
-                                visible_when="stim_chs == 'man'"),
-                           label='Events', show_border=True),
-                       HGroup(Item('save_as', enabled_when='can_save'), spring,
-                              'clear_all', show_labels=False),
-                       Item('queue_feedback', show_label=False,
-                            style='readonly'),
-                       Item('queue_current', show_label=False,
-                            style='readonly'),
-                       Item('queue_len_str', show_label=False,
-                            style='readonly'),
-                       ))
+    view = View(
+        VGroup(VGroup(Item('sqd_file', label="Data"),
+                      Item('sqd_fname', show_label=False,
+                           style='readonly'),
+                      Item('hsp_file', label='Dig Head Shape'),
+                      Item('hsp_fname', show_label=False,
+                           style='readonly'),
+                      Item('fid_file', label='Dig Points'),
+                      Item('fid_fname', show_label=False,
+                           style='readonly'),
+                      Item('reset_dig', label='Clear Digitizer Files',
+                           show_label=False),
+                      Item('use_mrk', editor=use_editor,
+                           style='custom'),
+                      label="Sources", show_border=True),
+               VGroup(Item('stim_slope', label="Event Onset",
+                           style='custom',
+                           editor=EnumEditor(
+                               values={'+': '2:Peak (0 to 5 V)',
+                                       '-': '1:Trough (5 to 0 V)'},
+                               cols=2),
+                           help="Whether events are marked by a decrease "
+                           "(trough) or an increase (peak) in trigger "
+                           "channel values"),
+                      Item('stim_chs', label="Binary Coding",
+                           style='custom',
+                           editor=EnumEditor(values={'>': '1:1 ... 128',
+                                                     '<': '3:128 ... 1',
+                                                     'man': '2:Manual'},
+                                             cols=2),
+                           help="Specifies the bit order in event "
+                           "channels. Assign the first bit (1) to the "
+                           "first or the last trigger channel."),
+                      Item('stim_chs_manual', label='Stim Channels',
+                           style='custom',
+                           visible_when="stim_chs == 'man'"),
+                      label='Events', show_border=True),
+               HGroup(Item('save_as', enabled_when='can_save'), spring,
+                      'clear_all', show_labels=False),
+               Item('queue_feedback', show_label=False, style='readonly'),
+               Item('queue_current', show_label=False, style='readonly'),
+               Item('queue_len_str', show_label=False, style='readonly')
+               )
+    )
 
     def __init__(self, *args, **kwargs):
         super(Kit2FiffPanel, self).__init__(*args, **kwargs)
@@ -433,21 +415,17 @@ class Kit2FiffPanel(HasPrivateTraits):
         m = self.model
         self.fid_obj = PointObject(scene=self.scene, color=(25, 225, 25),
                                    point_scale=5e-3)
-        m.sync_trait('fid', self.fid_obj, 'points', mutual=False)
-        m.sync_trait('head_dev_trans', self.fid_obj, 'trans', mutual=False)
-
         self.elp_obj = PointObject(scene=self.scene, color=(50, 50, 220),
                                    point_scale=1e-2, opacity=.2)
-        m.sync_trait('elp', self.elp_obj, 'points', mutual=False)
-        m.sync_trait('head_dev_trans', self.elp_obj, 'trans', mutual=False)
-
         self.hsp_obj = PointObject(scene=self.scene, color=(200, 200, 200),
                                    point_scale=2e-3)
-        m.sync_trait('hsp', self.hsp_obj, 'points', mutual=False)
-        m.sync_trait('head_dev_trans', self.hsp_obj, 'trans', mutual=False)
-
-        self.scene.camera.parallel_scale = 0.15
-        self.scene.mlab.view(0, 0, .15)
+        if not _testing_mode():
+            for name, obj in zip(['fid', 'elp', 'hsp'],
+                                 [self.fid_obj, self.elp_obj, self.hsp_obj]):
+                m.sync_trait(name, obj, 'points', mutual=False)
+                m.sync_trait('head_dev_trans', obj, 'trans', mutual=False)
+            self.scene.camera.parallel_scale = 0.15
+            self.scene.mlab.view(0, 0, .15)
 
     def _clear_all_fired(self):
         self.model.clear_all()
@@ -515,7 +493,7 @@ class Kit2FiffFrame(HasTraits):
                        VGroup(Item('kit2fiff_panel', style='custom'),
                               show_labels=False),
                        show_labels=False,
-                      ),
+                       ),
                 handler=Kit2FiffFrameHandler(),
                 height=700, resizable=True, buttons=NoButtons)
 
diff --git a/mne/gui/_marker_gui.py b/mne/gui/_marker_gui.py
index 86e631e..835a206 100644
--- a/mne/gui/_marker_gui.py
+++ b/mne/gui/_marker_gui.py
@@ -21,34 +21,16 @@ try:
     from tvtk.pyface.scene_editor import SceneEditor
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    cached_property = trait_wraith
-    on_trait_change = trait_wraith
-    MayaviScene = trait_wraith
-    MlabSceneModel = trait_wraith
-    Array = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    Enum = trait_wraith
-    File = trait_wraith
-    Float = trait_wraith
-    Instance = trait_wraith
-    Int = trait_wraith
-    List = trait_wraith
-    Property = trait_wraith
-    Str = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    HGroup = trait_wraith
-    VGroup = trait_wraith
-    CheckListEditor = trait_wraith
-    NoButtons = trait_wraith
-    SceneEditor = trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
+        Array = Bool = Button = Enum = File = Float = Instance = Int = \
+        List = Property = Str = View = Item = HGroup = VGroup = \
+        CheckListEditor = NoButtons = SceneEditor = trait_wraith
 
 from ..transforms import apply_trans, rotation, translation
 from ..coreg import fit_matched_points
-from ..io.kit import read_mrk, write_mrk
+from ..io.kit import read_mrk
+from ..io.meas_info import _write_dig_points
 from ._viewer import HeadViewController, headview_borders, PointObject
 
 
@@ -59,40 +41,39 @@ if backend_is_wx:
                     'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk',
                     'Text marker file (*.txt)|*.txt',
                     'Pickled markers (*.pickled)|*.pickled']
-    mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt",
-                        "Pickled KIT parameters (*.pickled)|*.pickled"]
+    mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt"]
 else:
     mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"]
-    mrk_out_wildcard = ["*.txt;*.pickled"]
-out_ext = ['.txt', '.pickled']
+    mrk_out_wildcard = "*.txt"
+out_ext = '.txt'
 
 
 use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
 use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
 
 mrk_view_editable = View(
-        VGroup('file',
-               Item('name', show_label=False, style='readonly'),
-               HGroup(
-                      Item('use', editor=use_editor_v, enabled_when="enabled",
-                           style='custom'),
-                      'points',
-                      ),
-               HGroup(Item('clear', enabled_when="can_save", show_label=False),
-                      Item('save_as', enabled_when="can_save",
-                           show_label=False)),
-                  ))
+    VGroup('file',
+           Item('name', show_label=False, style='readonly'),
+           HGroup(
+               Item('use', editor=use_editor_v, enabled_when="enabled",
+                    style='custom'),
+               'points',
+           ),
+           HGroup(Item('clear', enabled_when="can_save", show_label=False),
+                  Item('save_as', enabled_when="can_save",
+                       show_label=False)),
+           ))
 
 mrk_view_basic = View(
-        VGroup('file',
-               Item('name', show_label=False, style='readonly'),
-               Item('use', editor=use_editor_h, enabled_when="enabled",
-                    style='custom'),
-               HGroup(Item('clear', enabled_when="can_save", show_label=False),
-                      Item('edit', show_label=False),
-                      Item('save_as', enabled_when="can_save",
-                           show_label=False)),
-                  ))
+    VGroup('file',
+           Item('name', show_label=False, style='readonly'),
+           Item('use', editor=use_editor_h, enabled_when="enabled",
+                style='custom'),
+           HGroup(Item('clear', enabled_when="can_save", show_label=False),
+                  Item('edit', show_label=False),
+                  Item('save_as', enabled_when="can_save",
+                       show_label=False)),
+           ))
 
 mrk_view_edit = View(VGroup('points'))
 
@@ -119,15 +100,16 @@ class MarkerPoints(HasPrivateTraits):
         if dlg.return_code != OK:
             return
 
-        ext = out_ext[dlg.wildcard_index]
-        path = dlg.path
-        if not path.endswith(ext):
-            path = path + ext
-            if os.path.exists(path):
-                answer = confirm(None, "The file %r already exists. Should it "
-                                 "be replaced?", "Overwrite File?")
-                if answer != YES:
-                    return
+        path, ext = os.path.splitext(dlg.path)
+        if not path.endswith(out_ext) and len(ext) != 0:
+            ValueError("The extension '%s' is not supported." % ext)
+        path = path + out_ext
+
+        if os.path.exists(path):
+            answer = confirm(None, "The file %r already exists. Should it "
+                             "be replaced?", "Overwrite File?")
+            if answer != YES:
+                return
         self.save(path)
 
     def save(self, path):
@@ -140,7 +122,7 @@ class MarkerPoints(HasPrivateTraits):
             based on the extension: '.txt' for tab separated text file,
             '.pickled' for pickled file.
         """
-        write_mrk(path, self.points)
+        _write_dig_points(path, self.points)
 
 
 class MarkerPointSource(MarkerPoints):
@@ -342,9 +324,9 @@ class CombineMarkersModel(HasPrivateTraits):
 
     @cached_property
     def _get_distance(self):
-        if (self.mrk1 is None or self.mrk2 is None
-            or (not np.any(self.mrk1.points))
-            or (not np.any(self.mrk2.points))):
+        if (self.mrk1 is None or self.mrk2 is None or
+                (not np.any(self.mrk1.points)) or
+                (not np.any(self.mrk2.points))):
             return ""
 
         ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1))
@@ -406,14 +388,14 @@ class CombineMarkersPanel(HasTraits):
         self.sync_trait('trans', self.mrk1_obj, mutual=False)
         m.mrk1.sync_trait('points', self.mrk1_obj, 'points', mutual=False)
         m.mrk1.sync_trait('enabled', self.mrk1_obj, 'visible',
-                                   mutual=False)
+                          mutual=False)
 
         self.mrk2_obj = PointObject(scene=self.scene, color=(55, 155, 55),
                                     point_scale=self.scale)
         self.sync_trait('trans', self.mrk2_obj, mutual=False)
         m.mrk2.sync_trait('points', self.mrk2_obj, 'points', mutual=False)
         m.mrk2.sync_trait('enabled', self.mrk2_obj, 'visible',
-                                   mutual=False)
+                          mutual=False)
 
         self.mrk3_obj = PointObject(scene=self.scene, color=(150, 200, 255),
                                     point_scale=self.scale)
@@ -448,7 +430,6 @@ class CombineMarkersFrame(HasTraits):
                               Item('panel', style="custom"),
                               show_labels=False),
                        show_labels=False,
-                      ),
+                       ),
                 width=1100, resizable=True,
                 buttons=NoButtons)
-
diff --git a/mne/gui/_viewer.py b/mne/gui/_viewer.py
index 6b57023..f90a219 100644
--- a/mne/gui/_viewer.py
+++ b/mne/gui/_viewer.py
@@ -4,6 +4,7 @@
 #
 # License: BSD (3-clause)
 
+import os
 import numpy as np
 
 # allow import without traits
@@ -20,32 +21,11 @@ try:
     from traitsui.api import View, Item, Group, HGroup, VGrid, VGroup
 except:
     from ..utils import trait_wraith
-    HasTraits = object
-    HasPrivateTraits = object
-    cached_property = trait_wraith
-    on_trait_change = trait_wraith
-    MlabSceneModel = trait_wraith
-    Array = trait_wraith
-    Bool = trait_wraith
-    Button = trait_wraith
-    Color = trait_wraith
-    Enum = trait_wraith
-    Float = trait_wraith
-    Instance = trait_wraith
-    Int = trait_wraith
-    List = trait_wraith
-    Property = trait_wraith
-    Range = trait_wraith
-    Str = trait_wraith
-    View = trait_wraith
-    Item = trait_wraith
-    Group = trait_wraith
-    HGroup = trait_wraith
-    VGrid = trait_wraith
-    VGroup = trait_wraith
-    Glyph = trait_wraith
-    Surface = trait_wraith
-    VTKDataSource = trait_wraith
+    HasTraits = HasPrivateTraits = object
+    cached_property = on_trait_change = MlabSceneModel = Array = Bool = \
+        Button = Color = Enum = Float = Instance = Int = List = Property = \
+        Range = Str = View = Item = Group = HGroup = VGrid = VGroup = \
+        Glyph = Surface = VTKDataSource = trait_wraith
 
 from ..transforms import apply_trans
 
@@ -60,6 +40,11 @@ defaults = {'mri_fid_scale': 1e-2, 'hsp_fid_scale': 3e-2,
             'rpa_color': (0, 0, 255)}
 
 
+def _testing_mode():
+    """Helper to determine if we're running tests"""
+    return (os.getenv('_MNE_GUI_TESTING_MODE', '') == 'true')
+
+
 class HeadViewController(HasTraits):
     """
     Set head views for Anterior-Left-Superior coordinate system
@@ -138,8 +123,9 @@ class HeadViewController(HasTraits):
         if kwargs is None:
             raise ValueError("Invalid view: %r" % view)
 
-        self.scene.mlab.view(distance=None, reset_roll=True,
-                             figure=self.scene.mayavi_scene, **kwargs)
+        if not _testing_mode():
+            self.scene.mlab.view(distance=None, reset_roll=True,
+                                 figure=self.scene.mayavi_scene, **kwargs)
 
 
 class Object(HasPrivateTraits):
@@ -255,7 +241,10 @@ class PointObject(Object):
         if hasattr(self.src, 'remove'):
             self.src.remove()
 
-        fig = self.scene.mayavi_scene
+        if not _testing_mode():
+            fig = self.scene.mayavi_scene
+        else:
+            fig = None
 
         x, y, z = self.points.T
         scatter = pipeline.scalar_scatter(x, y, z)
@@ -310,7 +299,7 @@ class SurfaceObject(Object):
     @on_trait_change('scene.activated')
     def plot(self):
         """Add the points to the mayavi pipeline"""
-        _scale = self.scene.camera.parallel_scale
+        _scale = self.scene.camera.parallel_scale if not _testing_mode() else 1
         self.clear()
 
         if not np.any(self.tri):
@@ -338,4 +327,5 @@ class SurfaceObject(Object):
                         mutual=False)
         self.sync_trait('opacity', self.surf.actor.property, 'opacity')
 
-        self.scene.camera.parallel_scale = _scale
+        if not _testing_mode():
+            self.scene.camera.parallel_scale = _scale
diff --git a/mne/gui/tests/test_coreg_gui.py b/mne/gui/tests/test_coreg_gui.py
index 65f4cd7..a82e09d 100644
--- a/mne/gui/tests/test_coreg_gui.py
+++ b/mne/gui/tests/test_coreg_gui.py
@@ -2,7 +2,6 @@
 #
 # License: BSD (3-clause)
 
-from ...externals.six import string_types
 import os
 
 import numpy as np
@@ -12,27 +11,30 @@ from nose.tools import (assert_equal, assert_almost_equal, assert_false,
 import warnings
 
 import mne
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.io.kit.tests import data_dir as kit_data_dir
-from mne.utils import _TempDir, requires_traits, requires_mne_fs_in_env
+from mne.utils import (_TempDir, requires_traits, requires_mne,
+                       requires_freesurfer, run_tests_if_main)
+from mne.externals.six import string_types
 
 
-data_path = sample.data_path(download=False)
-raw_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
+data_path = testing.data_path(download=False)
+raw_path = os.path.join(data_path, 'MEG', 'sample',
+                        'sample_audvis_trunc_raw.fif')
+fname_trans = os.path.join(data_path, 'MEG', 'sample',
+                           'sample_audvis_trunc-trans.fif')
 kit_raw_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
 subjects_dir = os.path.join(data_path, 'subjects')
 warnings.simplefilter('always')
 
-tempdir = _TempDir()
 
-trans_dst = os.path.join(tempdir, 'test-trans.fif')
-
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
 def test_coreg_model():
     """Test CoregModel"""
     from mne.gui._coreg_gui import CoregModel
+    tempdir = _TempDir()
+    trans_dst = os.path.join(tempdir, 'test-trans.fif')
 
     model = CoregModel()
     assert_raises(RuntimeError, model.save_trans, 'blah.fif')
@@ -64,8 +66,8 @@ def test_coreg_model():
 
     model.fit_fiducials()
     old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
-    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2
-             + model.nasion_distance ** 2)
+    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
+             model.nasion_distance ** 2)
     assert_true(new_x < old_x)
 
     model.fit_hsp_points()
@@ -99,12 +101,27 @@ def test_coreg_model():
     assert_true(isinstance(model.fid_eval_str, string_types))
     assert_true(isinstance(model.points_eval_str, string_types))
 
+    model.get_prepare_bem_model_job('sample')
+    model.load_trans(fname_trans)
+
+    from mne.gui._coreg_gui import CoregFrame
+    x = CoregFrame(raw_path, 'sample', subjects_dir)
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits spews warnings
+            warnings.simplefilter('always')
+            x._init_plot()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
 
- at sample.requires_sample_data
+
+ at testing.requires_testing_data
 @requires_traits
- at requires_mne_fs_in_env
+ at requires_mne
+ at requires_freesurfer
 def test_coreg_model_with_fsaverage():
     """Test CoregModel"""
+    tempdir = _TempDir()
     from mne.gui._coreg_gui import CoregModel
 
     mne.create_default_subject(subjects_dir=tempdir)
@@ -140,8 +157,8 @@ def test_coreg_model_with_fsaverage():
 
     model.fit_scale_fiducials()
     old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
-    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2
-             + model.nasion_distance ** 2)
+    new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
+             model.nasion_distance ** 2)
     assert_true(new_x < old_x)
 
     model.fit_scale_hsp_points()
@@ -165,3 +182,6 @@ def test_coreg_model_with_fsaverage():
     with warnings.catch_warnings(record=True):
         model.hsp.file = kit_raw_path
     assert_equal(model.hsp.n_omitted, 0)
+
+
+run_tests_if_main()
diff --git a/mne/gui/tests/test_fiducials_gui.py b/mne/gui/tests/test_fiducials_gui.py
index dfe1413..4eea1f7 100644
--- a/mne/gui/tests/test_fiducials_gui.py
+++ b/mne/gui/tests/test_fiducials_gui.py
@@ -7,21 +7,20 @@ import os
 from numpy.testing import assert_array_equal
 from nose.tools import assert_true, assert_false, assert_equal
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.utils import _TempDir, requires_traits
 
-sample_path = sample.data_path(download=False)
+sample_path = testing.data_path(download=False)
 subjects_dir = os.path.join(sample_path, 'subjects')
 
-tempdir = _TempDir()
-tgt_fname = os.path.join(tempdir, 'test-fiducials.fif')
 
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
 def test_mri_model():
     """Test MRIHeadWithFiducialsModel Traits Model"""
     from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test-fiducials.fif')
 
     model = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir)
     model.subject = 'sample'
@@ -29,8 +28,8 @@ def test_mri_model():
     assert_false(model.can_reset)
     assert_false(model.can_save)
     model.lpa = [[-1, 0, 0]]
-    model.nasion = [[ 0, 1, 0]]
-    model.rpa = [[ 1, 0, 0]]
+    model.nasion = [[0, 1, 0]]
+    model.rpa = [[1, 0, 0]]
     assert_false(model.can_reset)
     assert_true(model.can_save)
 
@@ -44,8 +43,8 @@ def test_mri_model():
     # resetting the file should not affect the model's fiducials
     model.fid_file = ''
     assert_array_equal(model.lpa, [[-1, 0, 0]])
-    assert_array_equal(model.nasion, [[ 0, 1, 0]])
-    assert_array_equal(model.rpa, [[ 1, 0, 0]])
+    assert_array_equal(model.nasion, [[0, 1, 0]])
+    assert_array_equal(model.rpa, [[1, 0, 0]])
 
     # reset model
     model.lpa = [[0, 0, 0]]
@@ -58,11 +57,11 @@ def test_mri_model():
     # loading the file should assign the model's fiducials
     model.fid_file = tgt_fname
     assert_array_equal(model.lpa, [[-1, 0, 0]])
-    assert_array_equal(model.nasion, [[ 0, 1, 0]])
-    assert_array_equal(model.rpa, [[ 1, 0, 0]])
+    assert_array_equal(model.nasion, [[0, 1, 0]])
+    assert_array_equal(model.rpa, [[1, 0, 0]])
 
     # after changing from file model should be able to reset
     model.nasion = [[1, 1, 1]]
     assert_true(model.can_reset)
     model.reset = True
-    assert_array_equal(model.nasion, [[ 0, 1, 0]])
+    assert_array_equal(model.nasion, [[0, 1, 0]])
diff --git a/mne/gui/tests/test_file_traits.py b/mne/gui/tests/test_file_traits.py
index 096f438..ea90fb1 100644
--- a/mne/gui/tests/test_file_traits.py
+++ b/mne/gui/tests/test_file_traits.py
@@ -8,19 +8,20 @@ from numpy import array
 from numpy.testing import assert_allclose
 from nose.tools import assert_equal, assert_false, assert_raises, assert_true
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.io.tests import data_dir as fiff_data_dir
-from mne.utils import _TempDir, requires_mne_fs_in_env, requires_traits
+from mne.utils import (_TempDir, requires_mne, requires_freesurfer,
+                       requires_traits)
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 subjects_dir = os.path.join(data_path, 'subjects')
-bem_path = os.path.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem.fif')
-raw_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
+bem_path = os.path.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem.fif')
+inst_path = os.path.join(data_path, 'MEG', 'sample',
+                         'sample_audvis_trunc_raw.fif')
 fid_path = os.path.join(fiff_data_dir, 'fsaverage-fiducials.fif')
-tempdir = _TempDir()
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
 def test_bem_source():
     """Test BemSource"""
@@ -31,11 +32,11 @@ def test_bem_source():
     assert_equal(bem.tris.shape, (0, 3))
 
     bem.file = bem_path
-    assert_equal(bem.points.shape, (2562, 3))
-    assert_equal(bem.tris.shape, (5120, 3))
+    assert_equal(bem.points.shape, (642, 3))
+    assert_equal(bem.tris.shape, (1280, 3))
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
 def test_fiducials_source():
     """Test FiducialsSource"""
@@ -45,35 +46,35 @@ def test_fiducials_source():
     fid.file = fid_path
 
     points = array([[-0.08061612, -0.02908875, -0.04131077],
-                    [ 0.00146763, 0.08506715, -0.03483611],
-                    [ 0.08436285, -0.02850276, -0.04127743]])
+                    [0.00146763, 0.08506715, -0.03483611],
+                    [0.08436285, -0.02850276, -0.04127743]])
     assert_allclose(fid.points, points, 1e-6)
 
     fid.file = ''
     assert_equal(fid.points, None)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
-def test_raw_source():
-    """Test RawSource"""
-    from mne.gui._file_traits import RawSource
+def test_inst_source():
+    """Test InstSource"""
+    from mne.gui._file_traits import InstSource
 
-    raw = RawSource()
-    assert_equal(raw.raw_fname, '-')
+    inst = InstSource()
+    assert_equal(inst.inst_fname, '-')
 
-    raw.file = raw_path
-    assert_equal(raw.raw_dir, os.path.dirname(raw_path))
+    inst.file = inst_path
+    assert_equal(inst.inst_dir, os.path.dirname(inst_path))
 
-    lpa = array([[ -7.13766068e-02, 0.00000000e+00, 5.12227416e-09]])
-    nasion = array([[  3.72529030e-09, 1.02605611e-01, 4.19095159e-09]])
-    rpa = array([[  7.52676800e-02, 0.00000000e+00, 5.58793545e-09]])
-    assert_allclose(raw.lpa, lpa)
-    assert_allclose(raw.nasion, nasion)
-    assert_allclose(raw.rpa, rpa)
+    lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]])
+    nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]])
+    rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]])
+    assert_allclose(inst.lpa, lpa)
+    assert_allclose(inst.nasion, nasion)
+    assert_allclose(inst.rpa, rpa)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
 def test_subject_source():
     """Test SubjectSelector"""
@@ -85,12 +86,14 @@ def test_subject_source():
     mri.subject = 'sample'
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_traits
- at requires_mne_fs_in_env
+ at requires_mne
+ at requires_freesurfer
 def test_subject_source_with_fsaverage():
     """Test SubjectSelector"""
     from mne.gui._file_traits import MRISubjectSource
+    tempdir = _TempDir()
 
     mri = MRISubjectSource()
     assert_false(mri.can_create_fsaverage)
diff --git a/mne/gui/tests/test_kit2fiff_gui.py b/mne/gui/tests/test_kit2fiff_gui.py
index f391629..4e7d90a 100644
--- a/mne/gui/tests/test_kit2fiff_gui.py
+++ b/mne/gui/tests/test_kit2fiff_gui.py
@@ -3,6 +3,7 @@
 # License: BSD (3-clause)
 
 import os
+import warnings
 
 import numpy as np
 from numpy.testing import assert_allclose, assert_array_equal
@@ -11,7 +12,7 @@ from nose.tools import assert_true, assert_false, assert_equal
 import mne
 from mne.io.kit.tests import data_dir as kit_data_dir
 from mne.io import Raw
-from mne.utils import _TempDir, requires_traits
+from mne.utils import _TempDir, requires_traits, run_tests_if_main
 
 mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
 mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
@@ -20,15 +21,15 @@ hsp_path = os.path.join(kit_data_dir, 'test_hsp.txt')
 fid_path = os.path.join(kit_data_dir, 'test_elp.txt')
 fif_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
 
-tempdir = _TempDir()
-tgt_fname = os.path.join(tempdir, 'test-raw.fif')
-std_fname = os.path.join(tempdir, 'test_std-raw.fif')
+warnings.simplefilter('always')
 
 
 @requires_traits
 def test_kit2fiff_model():
     """Test CombineMarkersModel Traits Model"""
-    from mne.gui._kit2fiff_gui import Kit2FiffModel
+    from mne.gui._kit2fiff_gui import Kit2FiffModel, Kit2FiffPanel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test-raw.fif')
 
     model = Kit2FiffModel()
     assert_false(model.can_save)
@@ -48,7 +49,8 @@ def test_kit2fiff_model():
     # Compare exported raw with the original binary conversion
     raw_bin = Raw(fif_path)
     trans_bin = raw.info['dev_head_t']['trans']
-    assert_equal(raw_bin.info.keys(), raw.info.keys())
+    want_keys = list(raw_bin.info.keys())
+    assert_equal(sorted(want_keys), sorted(list(raw.info.keys())))
     trans_transform = raw_bin.info['dev_head_t']['trans']
     assert_allclose(trans_transform, trans_bin, 0.1)
 
@@ -86,3 +88,19 @@ def test_kit2fiff_model():
     raw = model.get_raw()
     events = mne.find_events(raw, stim_channel='STI 014')
     assert_array_equal(events, events_bin)
+
+    # test reset
+    model.clear_all()
+    assert_equal(model.use_mrk, [0, 1, 2, 3, 4])
+    assert_equal(model.sqd_file, "")
+
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits warnings
+            warnings.simplefilter('always')
+            Kit2FiffPanel()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
+
+
+run_tests_if_main()
diff --git a/mne/gui/tests/test_marker_gui.py b/mne/gui/tests/test_marker_gui.py
index 39e0c89..974d965 100644
--- a/mne/gui/tests/test_marker_gui.py
+++ b/mne/gui/tests/test_marker_gui.py
@@ -3,6 +3,7 @@
 # License: BSD (3-clause)
 
 import os
+import warnings
 
 import numpy as np
 from numpy.testing import assert_array_equal
@@ -10,20 +11,21 @@ from nose.tools import assert_true, assert_false
 
 from mne.io.kit.tests import data_dir as kit_data_dir
 from mne.io.kit import read_mrk
-from mne.utils import _TempDir, requires_traits
+from mne.utils import _TempDir, requires_traits, run_tests_if_main
 
 mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
 mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
 mrk_avg_path = os.path.join(kit_data_dir, 'test_mrk.sqd')
 
-tempdir = _TempDir()
-tgt_fname = os.path.join(tempdir, 'test.txt')
+warnings.simplefilter('always')
 
 
 @requires_traits
 def test_combine_markers_model():
     """Test CombineMarkersModel Traits Model"""
-    from mne.gui._marker_gui import CombineMarkersModel
+    from mne.gui._marker_gui import CombineMarkersModel, CombineMarkersPanel
+    tempdir = _TempDir()
+    tgt_fname = os.path.join(tempdir, 'test.txt')
 
     model = CombineMarkersModel()
 
@@ -68,3 +70,14 @@ def test_combine_markers_model():
     model.mrk1.file = mrk_pre_path
     model.mrk2.file = mrk_post_path
     assert_array_equal(model.mrk3.points, points_interpolate_mrk1_mrk2)
+
+    os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
+    try:
+        with warnings.catch_warnings(record=True):  # traits warnings
+            warnings.simplefilter('always')
+            CombineMarkersPanel()
+    finally:
+        del os.environ['_MNE_GUI_TESTING_MODE']
+
+
+run_tests_if_main()
diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py
index 01d11e5..d615b50 100644
--- a/mne/inverse_sparse/_gamma_map.py
+++ b/mne/inverse_sparse/_gamma_map.py
@@ -7,10 +7,12 @@ import numpy as np
 from scipy import linalg
 
 from ..forward import is_fixed_orient, _to_fixed_ori
-from ..io.pick import pick_channels_evoked
-from ..minimum_norm.inverse import _prepare_forward
+
+from ..minimum_norm.inverse import _check_reference
 from ..utils import logger, verbose
-from .mxne_inverse import _make_sparse_stc, _prepare_gain
+from ..externals.six.moves import xrange as range
+from .mxne_inverse import (_make_sparse_stc, _prepare_gain,
+                           _reapply_source_weighting, _compute_residual)
 
 
 @verbose
@@ -84,9 +86,10 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
         denom_fun = np.sqrt
     else:
         # do nothing
-        denom_fun = lambda x: x
+        def denom_fun(x):
+            return x
 
-    for itno in np.arange(maxit):
+    for itno in range(maxit):
         gammas[np.isnan(gammas)] = 0.0
 
         gidx = (np.abs(gammas) > eps)
@@ -110,11 +113,11 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
 
         if update_mode == 1:
             # MacKay fixed point update (10) in [1]
-            numer = gammas ** 2 * np.mean(np.abs(A) ** 2, axis=1)
+            numer = gammas ** 2 * np.mean((A * A.conj()).real, axis=1)
             denom = gammas * np.sum(G * CMinvG, axis=0)
         elif update_mode == 2:
             # modified MacKay fixed point update (11) in [1]
-            numer = gammas * np.sqrt(np.mean(np.abs(A) ** 2, axis=1))
+            numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1))
             denom = np.sum(G * CMinvG, axis=0)  # sqrt is applied below
         else:
             raise ValueError('Invalid value for update_mode')
@@ -138,8 +141,8 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
         gammas_full = np.zeros(n_sources, dtype=np.float)
         gammas_full[active_set] = gammas
 
-        err = (np.sum(np.abs(gammas_full - gammas_full_old))
-               / np.sum(np.abs(gammas_full_old)))
+        err = (np.sum(np.abs(gammas_full - gammas_full_old)) /
+               np.sum(np.abs(gammas_full_old)))
 
         gammas_full_old = gammas_full
 
@@ -167,7 +170,8 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1,
 @verbose
 def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
               xyz_same_gamma=True, maxit=10000, tol=1e-6, update_mode=1,
-              gammas=None, pca=True, return_residual=False, verbose=None):
+              gammas=None, pca=True, return_residual=False,
+              verbose=None):
     """Hierarchical Bayes (Gamma-MAP) sparse source localization method
 
     Models each source time course using a zero-mean Gaussian prior with an
@@ -231,6 +235,8 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     Wipf et al. A unified Bayesian framework for MEG/EEG source imaging,
     NeuroImage, vol. 44, no. 3, pp. 947-66, Mar. 2009.
     """
+    _check_reference(evoked)
+
     # make forward solution in fixed orientation if necessary
     if loose is None and not is_fixed_orient(forward):
         forward = deepcopy(forward)
@@ -241,18 +247,15 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     else:
         group_size = 3
 
-    gain_info, gain, _, whitener, _ = _prepare_forward(forward, evoked.info,
-                                                       noise_cov, pca)
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked.info, noise_cov, pca, depth, loose, None, None)
 
     # get the data
     sel = [evoked.ch_names.index(name) for name in gain_info['ch_names']]
     M = evoked.data[sel]
 
-    # whiten and prepare gain matrix
-    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
-                                                 depth, loose, None,
-                                                 None)
     # whiten the data
+    logger.info('Whitening data matrix.')
     M = np.dot(whitener, M)
 
     # run the optimization
@@ -263,17 +266,14 @@ def gamma_map(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     if len(active_set) == 0:
         raise Exception("No active dipoles found. alpha is too big.")
 
-    # reapply weights to have correct unit
-    X /= source_weighting[active_set][:, None]
+    # Reapply weights to have correct unit
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    X = _reapply_source_weighting(X, source_weighting,
+                                  active_set, n_dip_per_pos)
 
     if return_residual:
-        sel = [forward['sol']['row_names'].index(c)
-               for c in gain_info['ch_names']]
-        residual = evoked.copy()
-        residual = pick_channels_evoked(residual,
-                                        include=gain_info['ch_names'])
-        residual.data -= np.dot(forward['sol']['data'][sel, :][:, active_set],
-                                X)
+        residual = _compute_residual(forward, evoked, X, active_set,
+                                     gain_info)
 
     if group_size == 1 and not is_fixed_orient(forward):
         # make sure each source has 3 components
diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py
index 7a4f66f..e3c0b89 100755
--- a/mne/inverse_sparse/mxne_debiasing.py
+++ b/mne/inverse_sparse/mxne_debiasing.py
@@ -121,9 +121,15 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None):
         Y.fill(0.0)
         dt = (t0 - 1.0) / t
         Y = D + dt * (D - D0)
-        if linalg.norm(D - D0, np.inf) < tol:
-            logger.info("Debiasing converged after %d iterations" % i)
+
+        Ddiff = linalg.norm(D - D0, np.inf)
+
+        if Ddiff < tol:
+            logger.info("Debiasing converged after %d iterations "
+                        "max(|D - D0| = %e < %e)" % (i, Ddiff, tol))
             break
     else:
-        logger.info("Debiasing did not converge")
+        Ddiff = linalg.norm(D - D0, np.inf)
+        logger.info("Debiasing did not converge after %d iterations! "
+                    "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol))
     return D
diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py
index 7223096..72e5c75 100644
--- a/mne/inverse_sparse/mxne_inverse.py
+++ b/mne/inverse_sparse/mxne_inverse.py
@@ -1,4 +1,5 @@
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
 #
 # License: Simplified BSD
 
@@ -8,56 +9,128 @@ from scipy import linalg, signal
 
 from ..source_estimate import SourceEstimate
 from ..minimum_norm.inverse import combine_xyz, _prepare_forward
+from ..minimum_norm.inverse import _check_reference
 from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
 from ..io.pick import pick_channels_evoked
-from .mxne_optim import mixed_norm_solver, norm_l2inf, tf_mixed_norm_solver
+from ..io.proj import deactivate_proj
 from ..utils import logger, verbose
+from ..externals.six.moves import xrange as range
+
+from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver,
+                         norm_l2inf, tf_mixed_norm_solver)
 
 
 @verbose
-def _prepare_gain(gain, forward, whitener, depth, loose, weights, weights_min,
-                  verbose=None):
+def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
+    mask = None
+    if isinstance(weights, SourceEstimate):
+        # weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
+        weights = np.max(np.abs(weights.data), axis=1)
+    weights_max = np.max(weights)
+    if weights_min > weights_max:
+        raise ValueError('weights_min > weights_max (%s > %s)' %
+                         (weights_min, weights_max))
+    weights_min = weights_min / weights_max
+    weights = weights / weights_max
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
+    weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
+    if len(weights) != gain.shape[1]:
+        raise ValueError('weights do not have the correct dimension '
+                         ' (%d != %d)' % (len(weights), gain.shape[1]))
+    if len(source_weighting.shape) == 1:
+        source_weighting *= weights
+    else:
+        source_weighting *= weights[:, None]
+    gain *= weights[None, :]
+
+    if weights_min is not None:
+        mask = (weights > weights_min)
+        gain = gain[:, mask]
+        n_sources = np.sum(mask) // n_dip_per_pos
+        logger.info("Reducing source space to %d sources" % n_sources)
+
+    return gain, source_weighting, mask
+
+
+ at verbose
+def _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,
+                         weights_min, verbose=None):
+    gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
+                                                       noise_cov, pca)
+
     logger.info('Whitening lead field matrix.')
     gain = np.dot(whitener, gain)
 
-    # Handle depth prior scaling
-    source_weighting = np.sum(gain ** 2, axis=0) ** depth
+    if depth is not None:
+        depth_prior = np.sum(gain ** 2, axis=0) ** depth
+        source_weighting = np.sqrt(depth_prior ** -1.)
+    else:
+        source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)
 
-    # apply loose orientations
-    orient_prior = compute_orient_prior(forward, loose)
+    if loose is not None and loose != 1.0:
+        source_weighting *= np.sqrt(compute_orient_prior(forward, loose))
 
-    source_weighting /= orient_prior
-    source_weighting = np.sqrt(source_weighting)
-    gain /= source_weighting[None, :]
+    gain *= source_weighting[None, :]
 
-    # Handle weights
-    mask = None
-    if weights is not None:
-        if isinstance(weights, SourceEstimate):
-            # weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
-            weights = np.max(np.abs(weights.data), axis=1)
-        weights_max = np.max(weights)
-        if weights_min > weights_max:
-            raise ValueError('weights_min > weights_max (%s > %s)' %
-                             (weights_min, weights_max))
-        weights_min = weights_min / weights_max
-        weights = weights / weights_max
-        n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
-        weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
-        if len(weights) != gain.shape[1]:
-            raise ValueError('weights do not have the correct dimension '
-                             ' (%d != %d)' % (len(weights), gain.shape[1]))
-        nz_idx = np.where(weights != 0.0)[0]
-        source_weighting[nz_idx] /= weights[nz_idx]
-        gain *= weights[None, :]
-
-        if weights_min is not None:
-            mask = (weights > weights_min)
-            gain = gain[:, mask]
-            n_sources = np.sum(mask) / n_dip_per_pos
-            logger.info("Reducing source space to %d sources" % n_sources)
+    if weights is None:
+        mask = None
+    else:
+        gain, source_weighting, mask = _prepare_weights(forward, gain,
+                                                        source_weighting,
+                                                        weights, weights_min)
 
-    return gain, source_weighting, mask
+    return gain, gain_info, whitener, source_weighting, mask
+
+
+def _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,
+                  weights_min, verbose=None):
+    if not isinstance(depth, float):
+        raise ValueError('Invalid depth parameter. '
+                         'A float is required (got %s).'
+                         % type(depth))
+    elif depth < 0.0:
+        raise ValueError('Depth parameter must be positive (got %s).'
+                         % depth)
+
+    gain, gain_info, whitener, source_weighting, mask = \
+        _prepare_gain_column(forward, info, noise_cov, pca, depth,
+                             loose, weights, weights_min)
+
+    return gain, gain_info, whitener, source_weighting, mask
+
+
+def _reapply_source_weighting(X, source_weighting, active_set,
+                              n_dip_per_pos):
+    X *= source_weighting[active_set][:, None]
+    return X
+
+
+def _compute_residual(forward, evoked, X, active_set, info):
+    # OK, picking based on row_names is safe
+    sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
+    residual = evoked.copy()
+    residual = pick_channels_evoked(residual, include=info['ch_names'])
+    r_tmp = residual.copy()
+
+    r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
+
+    # Take care of proj
+    active_projs = list()
+    non_active_projs = list()
+    for p in evoked.info['projs']:
+        if p['active']:
+            active_projs.append(p)
+        else:
+            non_active_projs.append(p)
+
+    if len(active_projs) > 0:
+        r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
+        r_tmp.apply_proj()
+        r_tmp.add_proj(non_active_projs, remove_existing=False)
+
+    residual.data -= r_tmp.data
+
+    return residual
 
 
 @verbose
@@ -80,8 +153,8 @@ def _make_sparse_stc(X, active_set, forward, tmin, tstep,
 
     n_lh_points = len(src[0]['vertno'])
     lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]
-    rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points]
-                                             - n_lh_points]
+    rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points] -
+                                 n_lh_points]
     vertices = [lh_vertno, rh_vertno]
     stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
     return stc
@@ -91,17 +164,24 @@ def _make_sparse_stc(X, active_set, forward, tmin, tstep,
 def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
                maxit=3000, tol=1e-4, active_set_size=10, pca=True,
                debias=True, time_pca=True, weights=None, weights_min=None,
-               solver='auto', return_residual=False, verbose=None):
-    """Mixed-norm estimate (MxNE)
+               solver='auto', n_mxne_iter=1, return_residual=False,
+               verbose=None):
+    """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE)
 
-    Compute L1/L2 mixed-norm solution on evoked data.
+    Compute L1/L2 mixed-norm solution or L0.5/L2 mixed-norm solution
+    on evoked data.
 
     References:
-    Gramfort A., Kowalski M. and Hamalainen, M,
+    Gramfort A., Kowalski M. and Hamalainen, M.,
     Mixed-norm estimates for the M/EEG inverse problem using accelerated
     gradient methods, Physics in Medicine and Biology, 2012
     http://dx.doi.org/10.1088/0031-9155/57/7/1937
 
+    Strohmeier D., Haueisen J., and Gramfort A.,
+    Improved MEG/EEG source localization with reweighted mixed-norms,
+    4th International Workshop on Pattern Recognition in Neuroimaging,
+    Tuebingen, 2014
+
     Parameters
     ----------
     evoked : instance of Evoked or list of instances of Evoked
@@ -139,14 +219,18 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     weights_min : float
         Do not consider in the estimation sources for which weights
         is less than weights_min.
-    solver : 'prox' | 'cd' | 'auto'
-        The algorithm to use for the optimization. prox stands for
-        proximal interations using the FISTA algorithm while cd uses
-        coordinate descent. cd is only available for fixed orientation.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
+        The algorithm to use for the optimization. 'prox' stands for
+        proximal interations using the FISTA algorithm, 'cd' uses
+        coordinate descent, and 'bcd' applies block coordinate descent.
+        'cd' is only available for fixed orientation.
+    n_mxne_iter : int
+        The number of MxNE iterations. If > 1, iterative reweighting
+        is applied.
     return_residual : bool
         If True, the residual is returned as an Evoked instance.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -155,13 +239,24 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     residual : instance of Evoked
         The residual a.k.a. data not explained by the sources.
         Only returned if return_residual is True.
+
+    See Also
+    --------
+    tf_mixed_norm
     """
+    if n_mxne_iter < 1:
+        raise ValueError('MxNE has to be computed at least 1 time. '
+                         'Requires n_mxne_iter > 0. '
+                         'Got n_mxne_iter = %d.' % n_mxne_iter)
+
     if not isinstance(evoked, list):
         evoked = [evoked]
 
+    _check_reference(evoked[0])
+
     all_ch_names = evoked[0].ch_names
     if not all(all_ch_names == evoked[i].ch_names
-                                            for i in range(1, len(evoked))):
+               for i in range(1, len(evoked))):
         raise Exception('All the datasets must have the same good channels.')
 
     # put the forward solution in fixed orientation if it's not already
@@ -169,14 +264,9 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
         forward = deepcopy(forward)
         _to_fixed_ori(forward)
 
-    info = evoked[0].info
-    gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
-                                                       noise_cov, pca)
-
-    # Whiten lead field.
-    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
-                                                 depth, loose, weights,
-                                                 weights_min)
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked[0].info, noise_cov, pca, depth, loose, weights,
+        weights_min)
 
     sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
     M = np.concatenate([e.data[sel] for e in evoked], axis=1)
@@ -198,14 +288,18 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
     alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
     alpha_max *= 0.01
     gain /= alpha_max
-    source_weighting *= alpha_max
+    source_weighting /= alpha_max
 
-    X, active_set, E = mixed_norm_solver(M, gain, alpha,
-                                         maxit=maxit, tol=tol,
-                                         active_set_size=active_set_size,
-                                         debias=debias,
-                                         n_orient=n_dip_per_pos,
-                                         solver=solver)
+    if n_mxne_iter == 1:
+        X, active_set, E = mixed_norm_solver(
+            M, gain, alpha, maxit=maxit, tol=tol,
+            active_set_size=active_set_size, n_orient=n_dip_per_pos,
+            debias=debias, solver=solver, verbose=verbose)
+    else:
+        X, active_set, E = iterative_mixed_norm_solver(
+            M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
+            n_orient=n_dip_per_pos, active_set_size=active_set_size,
+            debias=debias, solver=solver, verbose=verbose)
 
     if mask is not None:
         active_set_tmp = np.zeros(len(mask), dtype=np.bool)
@@ -220,7 +314,8 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
         raise Exception("No active dipoles found. alpha is too big.")
 
     # Reapply weights to have correct unit
-    X /= source_weighting[active_set][:, None]
+    X = _reapply_source_weighting(X, source_weighting,
+                                  active_set, n_dip_per_pos)
 
     stcs = list()
     residual = list()
@@ -234,12 +329,8 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
         cnt += len(e.times)
 
         if return_residual:
-            sel = [forward['sol']['row_names'].index(c)
-                                                for c in gain_info['ch_names']]
-            r = deepcopy(e)
-            r = pick_channels_evoked(r, include=gain_info['ch_names'])
-            r.data -= np.dot(forward['sol']['data'][sel, :][:, active_set], Xe)
-            residual.append(r)
+            residual.append(_compute_residual(forward, e, Xe, active_set,
+                            gain_info))
 
     logger.info('[done]')
 
@@ -262,7 +353,7 @@ def _window_evoked(evoked, size):
         lsize = rsize = float(size)
     else:
         lsize, rsize = size
-    evoked = deepcopy(evoked)
+    evoked = evoked.copy()
     sfreq = float(evoked.info['sfreq'])
     lsize = int(lsize * sfreq)
     rsize = int(rsize * sfreq)
@@ -279,8 +370,8 @@ def _window_evoked(evoked, size):
 def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
                   loose=0.2, depth=0.8, maxit=3000, tol=1e-4,
                   weights=None, weights_min=None, pca=True, debias=True,
-                  wsize=64, tstep=4, window=0.02,
-                  return_residual=False, verbose=None):
+                  wsize=64, tstep=4, window=0.02, return_residual=False,
+                  verbose=None):
     """Time-Frequency Mixed-norm estimate (TF-MxNE)
 
     Compute L1/L2 + L1 mixed-norm solution on time frequency
@@ -310,10 +401,10 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
         Forward operator.
     noise_cov : instance of Covariance
         Noise covariance to compute whitener.
-    alpha_space : float
+    alpha_space : float in [0, 100]
         Regularization parameter for spatial sparsity. If larger than 100,
         then no source will be active.
-    alpha_time : float
+    alpha_time : float in [0, 100]
         Regularization parameter for temporal sparsity. It set to 0,
         no temporal regularization is applied. It this case, TF-MxNE is
         equivalent to MxNE with L21 norm.
@@ -337,6 +428,8 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
         is less than weights_min.
     pca: bool
         If True the rank of the data is reduced to true dimension.
+    debias: bool
+        Remove coefficient amplitude bias due to L1 penalty.
     wsize: int
         Length of the STFT window in samples (must be a multiple of 4).
     tstep: int
@@ -346,12 +439,10 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
         Length of time window used to take care of edge artifacts in seconds.
         It can be one float or float if the values are different for left
         and right window length.
-    debias: bool
-        Remove coefficient amplitude bias due to L1 penalty.
     return_residual : bool
         If True, the residual is returned as an Evoked instance.
-    verbose: bool
-        Verbose output or not.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -360,21 +451,34 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
     residual : instance of Evoked
         The residual a.k.a. data not explained by the sources.
         Only returned if return_residual is True.
+
+    See Also
+    --------
+    mixed_norm
     """
+    _check_reference(evoked)
+
     all_ch_names = evoked.ch_names
     info = evoked.info
 
+    if (alpha_space < 0.) or (alpha_space > 100.):
+        raise Exception('alpha_space must be in range [0, 100].'
+                        ' Got alpha_space = %f' % alpha_space)
+
+    if (alpha_time < 0.) or (alpha_time > 100.):
+        raise Exception('alpha_time must be in range [0, 100].'
+                        ' Got alpha_time = %f' % alpha_time)
+
     # put the forward solution in fixed orientation if it's not already
     if loose is None and not is_fixed_orient(forward):
         forward = deepcopy(forward)
         _to_fixed_ori(forward)
 
-    gain_info, gain, _, whitener, _ = _prepare_forward(forward,
-                                                      info, noise_cov, pca)
+    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
 
-    # Whiten lead field.
-    gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
-                                        depth, loose, weights, weights_min)
+    gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
+        forward, evoked.info, noise_cov, pca, depth, loose, weights,
+        weights_min)
 
     if window is not None:
         evoked = _window_evoked(evoked, window)
@@ -387,22 +491,19 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
     M = np.dot(whitener, M)
 
     # Scaling to make setting of alpha easy
-    n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
     alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
     alpha_max *= 0.01
     gain /= alpha_max
-    source_weighting *= alpha_max
+    source_weighting /= alpha_max
 
-    X, active_set, E = tf_mixed_norm_solver(M, gain,
-                                            alpha_space, alpha_time,
-                                            wsize=wsize, tstep=tstep,
-                                            maxit=maxit, tol=tol,
-                                            verbose=verbose,
-                                            n_orient=n_dip_per_pos,
-                                            debias=debias)
+    X, active_set, E = tf_mixed_norm_solver(
+        M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
+        maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
+        log_objective=False, debias=debias)
 
     if active_set.sum() == 0:
-        raise Exception("No active dipoles found. alpha is too big.")
+        raise Exception("No active dipoles found. "
+                        "alpha_space/alpha_time are too big.")
 
     if mask is not None:
         active_set_tmp = np.zeros(len(mask), dtype=np.bool)
@@ -410,23 +511,21 @@ def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
         active_set = active_set_tmp
         del active_set_tmp
 
-    # Reapply weights to have correct unit
-    X /= source_weighting[active_set][:, None]
+    X = _reapply_source_weighting(
+        X, source_weighting, active_set, n_dip_per_pos)
 
     if return_residual:
-        sel = [forward['sol']['row_names'].index(c)
-                                            for c in gain_info['ch_names']]
-        residual = deepcopy(evoked)
-        residual = pick_channels_evoked(residual, include=gain_info['ch_names'])
-        residual.data -= np.dot(forward['sol']['data'][sel, :][:, active_set],
-                                X)
-
-    tmin = evoked.times[0]
-    tstep = 1.0 / info['sfreq']
-    out = _make_sparse_stc(X, active_set, forward, tmin, tstep)
+        residual = _compute_residual(
+            forward, evoked, X, active_set, gain_info)
+
+    stc = _make_sparse_stc(
+        X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
+
     logger.info('[done]')
 
     if return_residual:
-        out = out, residual
+        out = stc, residual
+    else:
+        out = stc
 
     return out
diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py
index 6020d41..c3f929e 100644
--- a/mne/inverse_sparse/mxne_optim.py
+++ b/mne/inverse_sparse/mxne_optim.py
@@ -1,8 +1,10 @@
 from __future__ import print_function
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
 #
 # License: Simplified BSD
 
+from copy import deepcopy
 import warnings
 from math import sqrt, ceil
 import numpy as np
@@ -11,6 +13,7 @@ from scipy import linalg
 from .mxne_debiasing import compute_bias
 from ..utils import logger, verbose, sum_squared
 from ..time_frequency.stft import stft_norm2, stft, istft
+from ..externals.six.moves import xrange as range
 
 
 def groups_norm2(A, n_orient):
@@ -71,8 +74,8 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
     if is_stft:
         rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
     else:
-        rows_norm = np.sqrt(np.sum((np.abs(Y) ** 2).reshape(n_positions, -1),
-                                   axis=1))
+        rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
+                                                        -1).sum(axis=1))
     # Ensure shrink is >= 0 while avoiding any division by zero
     shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
     active_set = shrink > 0.0
@@ -110,7 +113,7 @@ def prox_l1(Y, alpha, n_orient):
     [ True  True False False]
     """
     n_positions = Y.shape[0] // n_orient
-    norms = np.sqrt(np.sum((np.abs(Y) ** 2).T.reshape(-1, n_orient), axis=1))
+    norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
     # Ensure shrink is >= 0 while avoiding any division by zero
     shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
     shrink = shrink.reshape(-1, n_positions).T
@@ -136,11 +139,11 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient):
 
     Parameters
     ----------
-    M : array of shape [n_sensors, n_times]
-        data
-    G : array of shape [n_sensors, n_active]
-        Gain matrix a.k.a. lead field
-    X : array of shape [n_active, n_times]
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_active)
+        The gain matrix a.k.a. lead field.
+    X : array, shape (n_active, n_times)
         Sources
     active_set : array of bool
         Mask of active sources
@@ -157,7 +160,7 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient):
         Primal cost
     dobj : float
         Dual cost. gap = pobj - dobj
-    R : array of shape [n_sensors, n_times]
+    R : array, shape (n_sensors, n_times)
         Current residual of M - G * X
     """
     GX = np.dot(G[:, active_set], X)
@@ -174,14 +177,12 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient):
 
 
 @verbose
-def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
-                            init=None, n_orient=1):
+def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
+                            tol=1e-8, verbose=None, init=None, n_orient=1):
     """Solves L21 inverse problem with proximal iterations and FISTA"""
     n_sensors, n_times = M.shape
     n_sensors, n_sources = G.shape
 
-    lipschitz_constant = 1.1 * linalg.norm(G, ord=2) ** 2
-
     if n_sources < n_sensors:
         gram = np.dot(G.T, G)
         GTM = np.dot(G.T, M)
@@ -237,8 +238,8 @@ def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
 
 
 @verbose
-def _mixed_norm_solver_cd(M, G, alpha, maxit=10000, tol=1e-8,
-                          verbose=None, init=None, n_orient=1):
+def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
+                          tol=1e-8, verbose=None, init=None, n_orient=1):
     """Solves L21 inverse problem with coordinate descent"""
     from sklearn.linear_model.coordinate_descent import MultiTaskLasso
 
@@ -262,10 +263,74 @@ def _mixed_norm_solver_cd(M, G, alpha, maxit=10000, tol=1e-8,
 
 
 @verbose
+def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
+                           tol=1e-8, verbose=None, init=None, n_orient=1):
+    """Solves L21 inverse problem with block coordinate descent"""
+    # First make G fortran for faster access to blocks of columns
+    G = np.asfortranarray(G)
+
+    n_sensors, n_times = M.shape
+    n_sensors, n_sources = G.shape
+    n_positions = n_sources // n_orient
+
+    if init is None:
+        X = np.zeros((n_sources, n_times))
+        R = M.copy()
+    else:
+        X = init
+        R = M - np.dot(G, X)
+
+    E = []  # track cost function
+
+    active_set = np.zeros(n_sources, dtype=np.bool)  # start with full AS
+
+    alpha_lc = alpha / lipschitz_constant
+
+    for i in range(maxit):
+        for j in range(n_positions):
+            idx = slice(j * n_orient, (j + 1) * n_orient)
+
+            G_j = G[:, idx]
+            X_j = X[idx]
+
+            X_j_new = np.dot(G_j.T, R) / lipschitz_constant[j]
+
+            was_non_zero = np.any(X_j)
+            if was_non_zero:
+                R += np.dot(G_j, X_j)
+                X_j_new += X_j
+
+            block_norm = linalg.norm(X_j_new, 'fro')
+            if block_norm <= alpha_lc[j]:
+                X_j.fill(0.)
+                active_set[idx] = False
+            else:
+                shrink = np.maximum(1.0 - alpha_lc[j] / block_norm, 0.0)
+                X_j_new *= shrink
+                R -= np.dot(G_j, X_j_new)
+                X_j[:] = X_j_new
+                active_set[idx] = True
+
+        gap, pobj, dobj, _ = dgap_l21(M, G, X[active_set], active_set, alpha,
+                                      n_orient)
+        E.append(pobj)
+        logger.debug("Iteration %d :: pobj %f :: dgap %f :: n_active %d" % (
+                     i + 1, pobj, gap, np.sum(active_set) / n_orient))
+
+        if gap < tol:
+            logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
+            break
+
+    X = X[active_set]
+
+    return X, active_set, E
+
+
+ at verbose
 def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
                       active_set_size=50, debias=True, n_orient=1,
                       solver='auto'):
-    """Solves L21 inverse solver with active set strategy
+    """Solves L1/L2 mixed-norm inverse problem with active set strategy
 
     Algorithm is detailed in:
     Gramfort A., Kowalski M. and Hamalainen, M,
@@ -275,31 +340,31 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
 
     Parameters
     ----------
-    M : array
-        The data
-    G : array
-        The forward operator
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
     alpha : float
         The regularization parameter. It should be between 0 and 100.
         A value of 100 will lead to an empty active set (no active source).
     maxit : int
-        The number of iterations
+        The number of iterations.
     tol : float
-        Tolerance on dual gap for convergence checking
+        Tolerance on dual gap for convergence checking.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     active_set_size : int
         Size of active set increase at each iteration.
     debias : bool
-        Debias source estimates
+        Debias source estimates.
     n_orient : int
         The number of orientation (1 : fixed or 3 : free or loose).
-    solver : 'prox' | 'cd' | 'auto'
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
         The algorithm to use for the optimization.
 
     Returns
     -------
-    X : array
+    X : array, shape (n_active, n_times)
         The source estimates.
     active_set : array
         The mask of active sources.
@@ -308,13 +373,14 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
     """
     n_dipoles = G.shape[1]
     n_positions = n_dipoles // n_orient
+    n_sensors, n_times = M.shape
     alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
     logger.info("-- ALPHA MAX : %s" % alpha_max)
     alpha = float(alpha)
 
     has_sklearn = True
     try:
-        from sklearn.linear_model.coordinate_descent import MultiTaskLasso
+        from sklearn.linear_model.coordinate_descent import MultiTaskLasso  # noqa
     except ImportError:
         has_sklearn = False
 
@@ -322,76 +388,213 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
         if has_sklearn and (n_orient == 1):
             solver = 'cd'
         else:
-            solver = 'prox'
+            solver = 'bcd'
 
     if solver == 'cd':
         if n_orient == 1 and not has_sklearn:
             warnings.warn("Scikit-learn >= 0.12 cannot be found. "
-                          "Using proximal iterations instead of coordinate "
-                          "descent.")
-            solver = 'prox'
+                          "Using block coordinate descent instead of "
+                          "coordinate descent.")
+            solver = 'bcd'
         if n_orient > 1:
             warnings.warn("Coordinate descent is only available for fixed "
-                          "orientation. Using proximal iterations instead of "
-                          "coordinate descent")
-            solver = 'prox'
+                          "orientation. Using block coordinate descent "
+                          "instead of coordinate descent")
+            solver = 'bcd'
 
     if solver == 'cd':
         logger.info("Using coordinate descent")
         l21_solver = _mixed_norm_solver_cd
+        lc = None
+    elif solver == 'bcd':
+        logger.info("Using block coordinate descent")
+        l21_solver = _mixed_norm_solver_bcd
+        G = np.asfortranarray(G)
+        if n_orient == 1:
+            lc = np.sum(G * G, axis=0)
+        else:
+            lc = np.empty(n_positions)
+            for j in range(n_positions):
+                G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
+                lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
     else:
         logger.info("Using proximal iterations")
         l21_solver = _mixed_norm_solver_prox
+        lc = 1.01 * linalg.norm(G, ord=2) ** 2
 
     if active_set_size is not None:
+        E = list()
         X_init = None
-        n_sensors, n_times = M.shape
+        active_set = np.zeros(n_dipoles, dtype=np.bool)
         idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
-        active_set = np.zeros(n_positions, dtype=np.bool)
-        active_set[idx_large_corr[-active_set_size:]] = True
+        new_active_idx = idx_large_corr[-active_set_size:]
         if n_orient > 1:
-            active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
+            new_active_idx = (n_orient * new_active_idx[:, None] +
+                              np.arange(n_orient)[None, :]).ravel()
+        active_set[new_active_idx] = True
+        as_size = np.sum(active_set)
         for k in range(maxit):
-            X, as_, E = l21_solver(M, G[:, active_set], alpha,
+            if solver == 'bcd':
+                lc_tmp = lc[active_set[::n_orient]]
+            elif solver == 'cd':
+                lc_tmp = None
+            else:
+                lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
+            X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
                                    maxit=maxit, tol=tol, init=X_init,
                                    n_orient=n_orient)
-            as_ = np.where(active_set)[0][as_]
-            gap, pobj, dobj, R = dgap_l21(M, G, X, as_, alpha, n_orient)
-            logger.info('gap = %s, pobj = %s' % (gap, pobj))
+            active_set[active_set] = as_.copy()
+            idx_old_active_set = np.where(active_set)[0]
+
+            gap, pobj, dobj, R = dgap_l21(M, G, X, active_set, alpha,
+                                          n_orient)
+            E.append(pobj)
+            logger.info("Iteration %d :: pobj %f :: dgap %f ::"
+                        "n_active_start %d :: n_active_end %d" % (
+                            k + 1, pobj, gap, as_size // n_orient,
+                            np.sum(active_set) // n_orient))
             if gap < tol:
                 logger.info('Convergence reached ! (gap: %s < %s)'
                             % (gap, tol))
                 break
-            else:  # add sources
+
+            # add sources if not last iteration
+            if k < (maxit - 1):
                 idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
-                                                         n_orient))
+                                            n_orient))
                 new_active_idx = idx_large_corr[-active_set_size:]
                 if n_orient > 1:
                     new_active_idx = (n_orient * new_active_idx[:, None] +
                                       np.arange(n_orient)[None, :])
                     new_active_idx = new_active_idx.ravel()
-                idx_old_active_set = as_
-                active_set_old = active_set.copy()
                 active_set[new_active_idx] = True
+                idx_active_set = np.where(active_set)[0]
                 as_size = np.sum(active_set)
-                logger.info('active set size %s' % as_size)
                 X_init = np.zeros((as_size, n_times), dtype=X.dtype)
-                idx_active_set = np.where(active_set)[0]
                 idx = np.searchsorted(idx_active_set, idx_old_active_set)
                 X_init[idx] = X
-                if np.all(active_set_old == active_set):
-                    logger.info('Convergence stopped (AS did not change) !')
-                    break
         else:
             logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
-
-        active_set = np.zeros_like(active_set)
-        active_set[as_] = True
     else:
-        X, active_set, E = l21_solver(M, G, alpha, maxit=maxit,
-                                      tol=tol, n_orient=n_orient)
+        X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
+                                      tol=tol, n_orient=n_orient, init=None)
 
-    if (active_set.sum() > 0) and debias:
+    if np.any(active_set) and debias:
+        bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
+        X *= bias[:, np.newaxis]
+
+    logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
+
+    return X, active_set, E
+
+
+ at verbose
+def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
+                                tol=1e-8, verbose=None, active_set_size=50,
+                                debias=True, n_orient=1, solver='auto'):
+    """Solves L0.5/L2 mixed-norm inverse problem with active set strategy
+
+    Algorithm is detailed in:
+
+    Strohmeier D., Haueisen J., and Gramfort A.:
+    Improved MEG/EEG source localization with reweighted mixed-norms,
+    4th International Workshop on Pattern Recognition in Neuroimaging,
+    Tuebingen, 2014
+
+    Parameters
+    ----------
+    M : array, shape (n_sensors, n_times)
+        The data.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
+    alpha : float
+        The regularization parameter. It should be between 0 and 100.
+        A value of 100 will lead to an empty active set (no active source).
+    n_mxne_iter : int
+        The number of MxNE iterations. If > 1, iterative reweighting
+        is applied.
+    maxit : int
+        The number of iterations.
+    tol : float
+        Tolerance on dual gap for convergence checking.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+    active_set_size : int
+        Size of active set increase at each iteration.
+    debias : bool
+        Debias source estimates.
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    solver : 'prox' | 'cd' | 'bcd' | 'auto'
+        The algorithm to use for the optimization.
+
+    Returns
+    -------
+    X : array, shape (n_active, n_times)
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function over the iterations.
+    """
+    def g(w):
+        return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
+
+    def gprime(w):
+        return 2. * np.repeat(g(w), n_orient).ravel()
+
+    E = list()
+
+    active_set = np.ones(G.shape[1], dtype=np.bool)
+    weights = np.ones(G.shape[1])
+    X = np.zeros((G.shape[1], M.shape[1]))
+
+    for k in range(n_mxne_iter):
+        X0 = X.copy()
+        active_set_0 = active_set.copy()
+        G_tmp = G[:, active_set] * weights[np.newaxis, :]
+
+        if active_set_size is not None:
+            if np.sum(active_set) > (active_set_size * n_orient):
+                X, _active_set, _ = mixed_norm_solver(
+                    M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                    maxit=maxit, tol=tol, active_set_size=active_set_size,
+                    solver=solver, verbose=verbose)
+            else:
+                X, _active_set, _ = mixed_norm_solver(
+                    M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                    maxit=maxit, tol=tol, active_set_size=None, solver=solver,
+                    verbose=verbose)
+        else:
+            X, _active_set, _ = mixed_norm_solver(
+                M, G_tmp, alpha, debias=False, n_orient=n_orient,
+                maxit=maxit, tol=tol, active_set_size=None, solver=solver,
+                verbose=verbose)
+
+        logger.info('active set size %d' % (_active_set.sum() / n_orient))
+
+        if _active_set.sum() > 0:
+            active_set[active_set] = _active_set
+
+            # Reapply weights to have correct unit
+            X *= weights[_active_set][:, np.newaxis]
+            weights = gprime(X)
+            p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set],  X),
+                                      'fro') ** 2. + alpha * np.sum(g(X))
+            E.append(p_obj)
+
+            # Check convergence
+            if ((k >= 1) and np.all(active_set == active_set_0) and
+                    np.all(np.abs(X - X0) < tol)):
+                print('Convergence reached after %d reweightings!' % k)
+                break
+        else:
+            active_set = np.zeros_like(active_set)
+            p_obj = 0.5 * linalg.norm(M) ** 2.
+            E.append(p_obj)
+            break
+
+    if np.any(active_set) and debias:
         bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
         X *= bias[:, np.newaxis]
 
@@ -468,11 +671,292 @@ class _PhiT(object):
                      self.n_times)
 
 
+def norm_l21_tf(Z, shape, n_orient):
+    if Z.shape[0]:
+        Z2 = Z.reshape(*shape)
+        l21_norm = np.sqrt(stft_norm2(Z2).reshape(-1, n_orient).sum(axis=1))
+        l21_norm = l21_norm.sum()
+    else:
+        l21_norm = 0.
+    return l21_norm
+
+
+def norm_l1_tf(Z, shape, n_orient):
+    if Z.shape[0]:
+        n_positions = Z.shape[0] // n_orient
+        Z_ = np.sqrt(np.sum((np.abs(Z) ** 2.).reshape((n_orient, -1),
+                     order='F'), axis=0))
+        Z_ = Z_.reshape((n_positions, -1), order='F').reshape(*shape)
+        l1_norm = (2. * Z_.sum(axis=2).sum(axis=1) - np.sum(Z_[:, 0, :],
+                   axis=1) - np.sum(Z_[:, -1, :], axis=1))
+        l1_norm = l1_norm.sum()
+    else:
+        l1_norm = 0.
+    return l1_norm
+
+
+ at verbose
+def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, alpha_space, alpha_time,
+                               lipschitz_constant, phi, phiT,
+                               wsize=64, tstep=4, n_orient=1,
+                               maxit=200, tol=1e-8, log_objective=True,
+                               perc=None, verbose=None):
+    # First make G fortran for faster access to blocks of columns
+    G = np.asfortranarray(G)
+
+    n_sensors, n_times = M.shape
+    n_sources = G.shape[1]
+    n_positions = n_sources // n_orient
+
+    n_step = int(ceil(n_times / float(tstep)))
+    n_freq = wsize // 2 + 1
+    shape = (-1, n_freq, n_step)
+
+    G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
+    R = M.copy()  # residual
+    active = np.where(active_set)[0][::n_orient] // n_orient
+    for idx in active:
+        R -= np.dot(G[idx], phiT(Z[idx]))
+
+    E = []  # track cost function
+
+    alpha_time_lc = alpha_time / lipschitz_constant
+    alpha_space_lc = alpha_space / lipschitz_constant
+
+    converged = False
+
+    for i in range(maxit):
+        val_norm_l21_tf = 0.0
+        val_norm_l1_tf = 0.0
+        max_diff = 0.0
+        active_set_0 = active_set.copy()
+        for j in range(n_positions):
+            ids = j * n_orient
+            ide = ids + n_orient
+
+            G_j = G[j]
+            Z_j = Z[j]
+            active_set_j = active_set[ids:ide]
+
+            Z0 = deepcopy(Z_j)
+
+            was_active = np.any(active_set_j)
+
+            # gradient step
+            GTR = np.dot(G_j.T, R) / lipschitz_constant[j]
+            X_j_new = GTR.copy()
+
+            if was_active:
+                X_j = phiT(Z_j)
+                R += np.dot(G_j, X_j)
+                X_j_new += X_j
+
+            rows_norm = linalg.norm(X_j_new, 'fro')
+            if rows_norm <= alpha_space_lc[j]:
+                if was_active:
+                    Z[j] = 0.0
+                    active_set_j[:] = False
+            else:
+                if was_active:
+                    Z_j_new = Z_j + phi(GTR)
+                else:
+                    Z_j_new = phi(GTR)
+
+                col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
+
+                if np.all(col_norm <= alpha_time_lc[j]):
+                    Z[j] = 0.0
+                    active_set_j[:] = False
+                else:
+                    # l1
+                    shrink = np.maximum(1.0 - alpha_time_lc[j] / np.maximum(
+                                        col_norm, alpha_time_lc[j]), 0.0)
+                    Z_j_new *= shrink[np.newaxis, :]
+
+                    # l21
+                    shape_init = Z_j_new.shape
+                    Z_j_new = Z_j_new.reshape(*shape)
+                    row_norm = np.sqrt(stft_norm2(Z_j_new).sum())
+                    if row_norm <= alpha_space_lc[j]:
+                        Z[j] = 0.0
+                        active_set_j[:] = False
+                    else:
+                        shrink = np.maximum(1.0 - alpha_space_lc[j] /
+                                            np.maximum(row_norm,
+                                            alpha_space_lc[j]), 0.0)
+                        Z_j_new *= shrink
+                        Z[j] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
+                        active_set_j[:] = True
+                        R -= np.dot(G_j, phiT(Z[j]))
+
+                        if log_objective:
+                            val_norm_l21_tf += norm_l21_tf(
+                                Z[j], shape, n_orient)
+                            val_norm_l1_tf += norm_l1_tf(
+                                Z[j], shape, n_orient)
+
+            max_diff = np.maximum(max_diff, np.max(np.abs(Z[j] - Z0)))
+
+        if log_objective:  # log cost function value
+            pobj = (0.5 * (R ** 2.).sum() + alpha_space * val_norm_l21_tf +
+                    alpha_time * val_norm_l1_tf)
+            E.append(pobj)
+            logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
+                        pobj, np.sum(active_set) / n_orient))
+        else:
+            logger.info("Iteration %d" % (i + 1))
+
+        if perc is not None:
+            if np.sum(active_set) / float(n_orient) <= perc * n_positions:
+                break
+
+        if np.array_equal(active_set, active_set_0):
+            if max_diff < tol:
+                logger.info("Convergence reached !")
+                converged = True
+                break
+
+    return Z, active_set, E, converged
+
+
+ at verbose
+def _tf_mixed_norm_solver_bcd_active_set(
+        M, G, alpha_space, alpha_time, lipschitz_constant, phi, phiT,
+        Z_init=None, wsize=64, tstep=4, n_orient=1, maxit=200, tol=1e-8,
+        log_objective=True, perc=None, verbose=None):
+    """Solves TF L21+L1 inverse solver with BCD and active set approach
+
+    Algorithm is detailed in:
+
+    Strohmeier D., Gramfort A., and Haueisen J.:
+    MEG/EEG source imaging with a non-convex penalty in the time-
+    frequency domain,
+    5th International Workshop on Pattern Recognition in Neuroimaging,
+    Stanford University, 2015
+
+    Parameters
+    ----------
+    M : array
+        The data.
+    G : array
+        The forward operator.
+    alpha_space : float in [0, 100]
+        Regularization parameter for spatial sparsity. If larger than 100,
+        then no source will be active.
+    alpha_time : float in [0, 100]
+        Regularization parameter for temporal sparsity. It set to 0,
+        no temporal regularization is applied. It this case, TF-MxNE is
+        equivalent to MxNE with L21 norm.
+    lipschitz_constant : float
+        The lipschitz constant of the spatio temporal linear operator.
+    phi : instance of _Phi
+        The TF operator.
+    phiT : instance of _PhiT
+        The transpose of the TF operator.
+    Z_init : None | array
+        The initialization of the TF coefficient matrix. If None, zeros
+        will be used for all coefficients.
+    wsize: int
+        length of the STFT window in samples (must be a multiple of 4).
+    tstep: int
+        step between successive windows in samples (must be a multiple of 2,
+        a divider of wsize and smaller than wsize/2) (default: wsize/2).
+    n_orient : int
+        The number of orientation (1 : fixed or 3 : free or loose).
+    maxit : int
+        The number of iterations.
+    tol : float
+        If absolute difference between estimates at 2 successive iterations
+        is lower than tol, the convergence is reached.
+    log_objective : bool
+        If True, the value of the minimized objective function is computed
+        and stored at every iteration.
+    perc : None | float in [0, 1]
+        The early stopping parameter used for BCD with active set approach.
+        If the active set size is smaller than perc * n_sources, the
+        subproblem limited to the active set is stopped. If None, full
+        convergence will be achieved.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    X : array
+        The source estimates.
+    active_set : array
+        The mask of active sources.
+    E : list
+        The value of the objective function at each iteration. If log_objective
+        is False, it will be empty.
+    """
+    n_sources = G.shape[1]
+    n_positions = n_sources // n_orient
+
+    if Z_init is None:
+        Z = dict.fromkeys(range(n_positions), 0.0)
+        active_set = np.zeros(n_sources, dtype=np.bool)
+    else:
+        active_set = np.zeros(n_sources, dtype=np.bool)
+        active = list()
+        for i in range(n_positions):
+            if np.any(Z_init[i * n_orient:(i + 1) * n_orient]):
+                active_set[i * n_orient:(i + 1) * n_orient] = True
+                active.append(i)
+        Z = dict.fromkeys(range(n_positions), 0.0)
+        if len(active):
+            Z.update(dict(zip(active, np.vsplit(Z_init[active_set],
+                     len(active)))))
+
+    Z, active_set, E, _ = _tf_mixed_norm_solver_bcd_(
+        M, G, Z, active_set, alpha_space, alpha_time, lipschitz_constant,
+        phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=1,
+        tol=tol, log_objective=log_objective, perc=None, verbose=verbose)
+
+    while active_set.sum():
+        active = np.where(active_set)[0][::n_orient] // n_orient
+        Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
+        Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
+            M, G[:, active_set], Z_init,
+            np.ones(len(active) * n_orient, dtype=np.bool),
+            alpha_space, alpha_time,
+            lipschitz_constant[active_set[::n_orient]],
+            phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient,
+            maxit=maxit, tol=tol, log_objective=log_objective,
+            perc=0.5, verbose=verbose)
+        E += E_tmp
+        active = np.where(active_set)[0][::n_orient] // n_orient
+        Z_init = dict.fromkeys(range(n_positions), 0.0)
+        Z_init.update(dict(zip(active, Z.values())))
+        active_set[active_set] = as_
+        active_set_0 = active_set.copy()
+        Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
+            M, G, Z_init, active_set, alpha_space, alpha_time,
+            lipschitz_constant, phi, phiT, wsize=wsize, tstep=tstep,
+            n_orient=n_orient, maxit=1, tol=tol, log_objective=log_objective,
+            perc=None, verbose=verbose)
+        E += E_tmp
+        if converged:
+            if np.array_equal(active_set_0, active_set):
+                break
+
+    if active_set.sum():
+        Z = np.vstack([Z_ for Z_ in list(Z.values()) if np.any(Z_)])
+        X = phiT(Z)
+    else:
+        n_sensors, n_times = M.shape
+        n_step = int(ceil(n_times / float(tstep)))
+        n_freq = wsize // 2 + 1
+        Z = np.zeros((0, n_step * n_freq), dtype=np.complex)
+        X = np.zeros((0, n_times))
+
+    return X, Z, active_set, E
+
+
 @verbose
 def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
                          n_orient=1, maxit=200, tol=1e-8, log_objective=True,
-                         lipschitz_constant=None, debias=True, verbose=None):
-    """Solves TF L21+L1 inverse solver
+                         debias=True, verbose=None):
+    """Solves TF L21+L1 inverse solver with BCD and active set approach
 
     Algorithm is detailed in:
 
@@ -492,10 +976,10 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
 
     Parameters
     ----------
-    M : array
+    M : array, shape (n_sensors, n_times)
         The data.
-    G : array
-        The forward operator.
+    G : array, shape (n_sensors, n_dipoles)
+        The gain matrix a.k.a. lead field.
     alpha_space : float
         The spatial regularization parameter. It should be between 0 and 100.
     alpha_time : float
@@ -516,9 +1000,6 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
     log_objective : bool
         If True, the value of the minimized objective function is computed
         and stored at every iteration.
-    lipschitz_constant : float | None
-        The lipschitz constant of the spatio temporal linear operator.
-        If None it is estimated.
     debias : bool
         Debias source estimates.
     verbose : bool, str, int, or None
@@ -526,7 +1007,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
 
     Returns
     -------
-    X : array
+    X : array, shape (n_active, n_times)
         The source estimates.
     active_set : array
         The mask of active sources.
@@ -535,7 +1016,8 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
         is False, it will be empty.
     """
     n_sensors, n_times = M.shape
-    n_dipoles = G.shape[1]
+    n_sensors, n_sources = G.shape
+    n_positions = n_sources // n_orient
 
     n_step = int(ceil(n_times / float(tstep)))
     n_freq = wsize // 2 + 1
@@ -543,89 +1025,21 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
     phi = _Phi(wsize, tstep, n_coefs)
     phiT = _PhiT(tstep, n_freq, n_step, n_times)
 
-    Z = np.zeros((0, n_coefs), dtype=np.complex)
-    active_set = np.zeros(n_dipoles, dtype=np.bool)
-    R = M.copy()  # residual
-
-    if lipschitz_constant is None:
-        lipschitz_constant = 1.1 * tf_lipschitz_constant(M, G, phi, phiT)
-
-    logger.info("lipschitz_constant : %s" % lipschitz_constant)
-
-    t = 1.0
-    Y = np.zeros((n_dipoles, n_coefs), dtype=np.complex)  # FISTA aux variable
-    Y[active_set] = Z
-    E = []  # track cost function
-    Y_time_as = None
-    Y_as = None
-
-    alpha_time_lc = alpha_time / lipschitz_constant
-    alpha_space_lc = alpha_space / lipschitz_constant
-    for i in range(maxit):
-        Z0, active_set_0 = Z, active_set  # store previous values
-
-        if active_set.sum() < len(R) and Y_time_as is not None:
-            # trick when using tight frame to do a first screen based on
-            # L21 prox (L21 norms are not changed by phi)
-            GTR = np.dot(G.T, R) / lipschitz_constant
-            A = GTR.copy()
-            A[Y_as] += Y_time_as
-            _, active_set_l21 = prox_l21(A, alpha_space_lc, n_orient)
-            # just compute prox_l1 on rows that won't be zeroed by prox_l21
-            B = Y[active_set_l21] + phi(GTR[active_set_l21])
-            Z, active_set_l1 = prox_l1(B, alpha_time_lc, n_orient)
-            active_set_l21[active_set_l21] = active_set_l1
-            active_set_l1 = active_set_l21
-        else:
-            Y += np.dot(G.T, phi(R)) / lipschitz_constant  # ISTA step
-            Z, active_set_l1 = prox_l1(Y, alpha_time_lc, n_orient)
-
-        Z, active_set_l21 = prox_l21(Z, alpha_space_lc, n_orient,
-                                     shape=(-1, n_freq, n_step), is_stft=True)
-        active_set = active_set_l1
-        active_set[active_set_l1] = active_set_l21
-
-        # Check convergence : max(abs(Z - Z0)) < tol
-        stop = (safe_max_abs(Z, ~active_set_0[active_set]) < tol and
-                safe_max_abs(Z0, ~active_set[active_set_0]) < tol and
-                safe_max_abs_diff(Z, active_set_0[active_set],
-                                  Z0, active_set[active_set_0]) < tol)
-        if stop:
-            print('Convergence reached !')
-            break
-
-        # FISTA 2 steps
-        # compute efficiently : Y = Z + ((t0 - 1.0) / t) * (Z - Z0)
-        t0 = t
-        t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
-        Y.fill(0.0)
-        dt = ((t0 - 1.0) / t)
-        Y[active_set] = (1.0 + dt) * Z
-        if len(Z0):
-            Y[active_set_0] -= dt * Z0
-        Y_as = active_set_0 | active_set
-
-        Y_time_as = phiT(Y[Y_as])
-        R = M - np.dot(G[:, Y_as], Y_time_as)
-
-        if log_objective:  # log cost function value
-            Z2 = np.abs(Z)
-            Z2 **= 2
-            X = phiT(Z)
-            RZ = M - np.dot(G[:, active_set], X)
-            pobj = 0.5 * linalg.norm(RZ, ord='fro') ** 2 \
-               + alpha_space * norm_l21(X, n_orient) \
-               + alpha_time * np.sqrt(np.sum(Z2.T.reshape(-1, n_orient),
-                                             axis=1)).sum()
-            E.append(pobj)
-            logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
-                        pobj, np.sum(active_set)))
-        else:
-            logger.info("Iteration %d" % i + 1)
-
-    X = phiT(Z)
-
-    if (active_set.sum() > 0) and debias:
+    if n_orient == 1:
+        lc = np.sum(G * G, axis=0)
+    else:
+        lc = np.empty(n_positions)
+        for j in range(n_positions):
+            G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
+            lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
+
+    logger.info("Using block coordinate descent and active set approach")
+    X, Z, active_set, E = _tf_mixed_norm_solver_bcd_active_set(
+        M, G, alpha_space, alpha_time, lc, phi, phiT, Z_init=None,
+        wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=maxit, tol=tol,
+        log_objective=log_objective, verbose=None)
+
+    if np.any(active_set) and debias:
         bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
         X *= bias[:, np.newaxis]
 
diff --git a/mne/inverse_sparse/tests/test_gamma_map.py b/mne/inverse_sparse/tests/test_gamma_map.py
index b350d1e..2a36d87 100644
--- a/mne/inverse_sparse/tests/test_gamma_map.py
+++ b/mne/inverse_sparse/tests/test_gamma_map.py
@@ -7,24 +7,32 @@ import numpy as np
 from nose.tools import assert_true
 from numpy.testing import assert_array_almost_equal
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import read_cov, read_forward_solution, read_evokeds
 from mne.cov import regularize
 from mne.inverse_sparse import gamma_map
+from mne import pick_types_forward
+from mne.utils import run_tests_if_main, slow_test
 
-data_path = sample.data_path(download=False)
-fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+data_path = testing.data_path(download=False)
+fname_evoked = op.join(data_path, 'MEG', 'sample',
+                       'sample_audvis-ave.fif')
 fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-eeg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+subjects_dir = op.join(data_path, 'subjects')
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_gamma_map():
     """Test Gamma MAP inverse"""
+
     forward = read_forward_solution(fname_fwd, force_fixed=False,
                                     surf_ori=True)
+    forward = pick_types_forward(forward, meg=False, eeg=True)
     evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
+    evoked.resample(50)
     evoked.crop(tmin=0, tmax=0.3)
 
     cov = read_cov(fname_cov)
@@ -32,20 +40,25 @@ def test_gamma_map():
 
     alpha = 0.2
     stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
-                    xyz_same_gamma=True, update_mode=1)
+                    xyz_same_gamma=True, update_mode=1, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
     idx = np.argmax(np.sum(stc.data ** 2, axis=1))
-    assert_true(np.concatenate(stc.vertno)[idx] == 96397)
+    assert_true(np.concatenate(stc.vertices)[idx] == 96397)
 
     stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
-                    xyz_same_gamma=False, update_mode=1)
+                    xyz_same_gamma=False, update_mode=1, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
     idx = np.argmax(np.sum(stc.data ** 2, axis=1))
-    assert_true(np.concatenate(stc.vertno)[idx] == 82010)
+    assert_true(np.concatenate(stc.vertices)[idx] == 82010)
 
     # force fixed orientation
     stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
                          xyz_same_gamma=False, update_mode=2,
-                         loose=None, return_residual=True)
+                         loose=None, return_residual=True, verbose=False)
+    assert_array_almost_equal(stc.times, evoked.times, 5)
     idx = np.argmax(np.sum(stc.data ** 2, axis=1))
-    assert_true(np.concatenate(stc.vertno)[idx] == 83398)
-
+    # assert_true(np.concatenate(stc.vertices)[idx] == 83398)  # XXX FIX
     assert_array_almost_equal(evoked.times, res.times)
+
+
+run_tests_if_main()
diff --git a/mne/inverse_sparse/tests/test_mxne_inverse.py b/mne/inverse_sparse/tests/test_mxne_inverse.py
index 2a5ac19..9b0c134 100644
--- a/mne/inverse_sparse/tests/test_mxne_inverse.py
+++ b/mne/inverse_sparse/tests/test_mxne_inverse.py
@@ -4,33 +4,32 @@
 # License: Simplified BSD
 
 import os.path as op
-import copy
 import numpy as np
-from numpy.testing import assert_array_almost_equal
-from nose.tools import assert_true
+from numpy.testing import assert_array_almost_equal, assert_allclose
+from nose.tools import assert_true, assert_equal
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.label import read_label
 from mne import read_cov, read_forward_solution, read_evokeds
 from mne.inverse_sparse import mixed_norm, tf_mixed_norm
 from mne.minimum_norm import apply_inverse, make_inverse_operator
+from mne.utils import run_tests_if_main, slow_test
 
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
+# NOTE: These use the ave and cov from sample dataset (no _trunc)
 fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
 fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
 label = 'Aud-rh'
 fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_mxne_inverse():
     """Test (TF-)MxNE inverse computation"""
-    # Handling forward solution
-    evoked = read_evokeds(fname_data, condition=1, baseline=(None, 0))
-
     # Read noise covariance matrix
     cov = read_cov(fname_cov)
 
@@ -39,46 +38,66 @@ def test_mxne_inverse():
     depth = 0.9
 
     evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
-    evoked.crop(tmin=-0.1, tmax=0.4)
+    evoked.crop(tmin=-0.05, tmax=0.2)
 
-    evoked_l21 = copy.deepcopy(evoked)
+    evoked_l21 = evoked.copy()
     evoked_l21.crop(tmin=0.08, tmax=0.1)
     label = read_label(fname_label)
-    weights_min = 0.5
+
     forward = read_forward_solution(fname_fwd, force_fixed=False,
                                     surf_ori=True)
 
     # Reduce source space to make test computation faster
-    inverse_operator = make_inverse_operator(evoked.info, forward, cov,
+    inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
                                              loose=loose, depth=depth,
                                              fixed=True)
     stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
                              method='dSPM')
     stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
     stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
+    weights_min = 0.5
 
     # MxNE tests
-    alpha = 60  # spatial regularization parameter
-
-    stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
-                          depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
-                          solver='prox')
-    stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
-                        depth=0.9, maxit=1000, tol=1e-8, active_set_size=10,
+    alpha = 70  # spatial regularization parameter
+
+    stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                          depth=depth, maxit=500, tol=1e-8,
+                          active_set_size=10, weights=stc_dspm,
+                          weights_min=weights_min, solver='prox')
+    stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                        depth=depth, maxit=500, tol=1e-8, active_set_size=10,
+                        weights=stc_dspm, weights_min=weights_min,
                         solver='cd')
+    stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                         depth=depth, maxit=500, tol=1e-8, active_set_size=10,
+                         weights=stc_dspm, weights_min=weights_min,
+                         solver='bcd')
     assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
     assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
-    assert_array_almost_equal(stc_prox.data, stc_cd.data, 5)
-    assert_true(stc_prox.vertno[1][0] in label.vertices)
-    assert_true(stc_cd.vertno[1][0] in label.vertices)
 
-    stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=None,
-                        depth=depth, maxit=500, tol=1e-4, active_set_size=10,
-                        weights=stc_dspm, weights_min=weights_min,
-                        return_residual=True)
+    assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
+    assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
+    assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
+    assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
+    assert_true(stc_prox.vertices[1][0] in label.vertices)
+    assert_true(stc_cd.vertices[1][0] in label.vertices)
+    assert_true(stc_bcd.vertices[1][0] in label.vertices)
+
+    stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
+                        depth=depth, maxit=500, tol=1e-8,
+                        active_set_size=10, return_residual=True,
+                        solver='cd')
+    assert_array_almost_equal(stc.times, evoked_l21.times, 5)
+    assert_true(stc.vertices[1][0] in label.vertices)
 
+    # irMxNE tests
+    stc = mixed_norm(evoked_l21, forward, cov, alpha,
+                     n_mxne_iter=5, loose=loose, depth=depth,
+                     maxit=500, tol=1e-8, active_set_size=10,
+                     solver='cd')
     assert_array_almost_equal(stc.times, evoked_l21.times, 5)
-    assert_true(stc.vertno[1][0] in label.vertices)
+    assert_true(stc.vertices[1][0] in label.vertices)
+    assert_equal(stc.vertices, [[63152], [79017]])
 
     # Do with TF-MxNE for test memory savings
     alpha_space = 60.  # spatial regularization parameter
@@ -88,6 +107,8 @@ def test_mxne_inverse():
                            loose=loose, depth=depth, maxit=100, tol=1e-4,
                            tstep=4, wsize=16, window=0.1, weights=stc_dspm,
                            weights_min=weights_min, return_residual=True)
-
     assert_array_almost_equal(stc.times, evoked.times, 5)
-    assert_true(stc.vertno[1][0] in label.vertices)
+    assert_true(stc.vertices[1][0] in label.vertices)
+
+
+run_tests_if_main()
diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py
index b810529..ba49be7 100644
--- a/mne/inverse_sparse/tests/test_mxne_optim.py
+++ b/mne/inverse_sparse/tests/test_mxne_optim.py
@@ -1,13 +1,16 @@
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Daniel Strohmeier <daniel.strohmeier at gmail.com>
 #
 # License: Simplified BSD
 
 import numpy as np
 import warnings
 from numpy.testing import assert_array_equal, assert_array_almost_equal
+from numpy.testing import assert_allclose
 
 from mne.inverse_sparse.mxne_optim import (mixed_norm_solver,
-                                           tf_mixed_norm_solver)
+                                           tf_mixed_norm_solver,
+                                           iterative_mixed_norm_solver)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -31,7 +34,7 @@ def _generate_tf_data():
 
 def test_l21_mxne():
     """Test convergence of MxNE solver"""
-    n, p, t, alpha = 30, 40, 20, 1
+    n, p, t, alpha = 30, 40, 20, 1.
     rng = np.random.RandomState(0)
     G = rng.randn(n, p)
     G /= np.std(G, axis=0)[None, :]
@@ -40,86 +43,154 @@ def test_l21_mxne():
     X[4] = -2
     M = np.dot(G, X)
 
-    X_hat_prox, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=None, debias=True,
-                            solver='prox')
+    args = (M, G, alpha, 1000, 1e-8)
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=None,
+        debias=True, solver='prox')
     assert_array_equal(np.where(active_set)[0], [0, 4])
-    X_hat_cd, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=None, debias=True,
-                            solver='cd')
+    X_hat_cd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=None,
+        debias=True, solver='cd')
     assert_array_equal(np.where(active_set)[0], [0, 4])
-    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        M, G, alpha, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
+    assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
 
-    X_hat_prox, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            solver='prox')
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='prox')
     assert_array_equal(np.where(active_set)[0], [0, 4])
-    X_hat_cd, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            solver='cd')
+    X_hat_cd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='cd')
     assert_array_equal(np.where(active_set)[0], [0, 4])
-    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
 
-    X_hat_prox, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            n_orient=2, solver='prox')
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=2, solver='prox')
     assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=2, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+
     # suppress a coordinate-descent warning here
     with warnings.catch_warnings(record=True):
-        X_hat_cd, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            n_orient=2, solver='cd')
+        X_hat_cd, active_set, _ = mixed_norm_solver(
+            *args, active_set_size=2, debias=True, n_orient=2, solver='cd')
     assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
-    assert_array_equal(X_hat_prox, X_hat_cd)
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
+    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
 
-    X_hat_prox, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            n_orient=5)
+    X_hat_bcd, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=5, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    X_hat_prox, active_set, _ = mixed_norm_solver(
+        *args, active_set_size=2, debias=True, n_orient=5, solver='prox')
     assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
     with warnings.catch_warnings(record=True):  # coordinate-ascent warning
-        X_hat_cd, active_set, _ = mixed_norm_solver(M,
-                            G, alpha, maxit=1000, tol=1e-8,
-                            active_set_size=2, debias=True,
-                            n_orient=5, solver='cd')
+        X_hat_cd, active_set, _ = mixed_norm_solver(
+            *args, active_set_size=2, debias=True, n_orient=5, solver='cd')
+
     assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    assert_array_equal(X_hat_bcd, X_hat_cd)
+    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
 
 
 def test_tf_mxne():
     """Test convergence of TF-MxNE solver"""
-    alpha_space = 10
-    alpha_time = 5
+    alpha_space = 10.
+    alpha_time = 5.
 
     M, G, active_set = _generate_tf_data()
 
-    X_hat, active_set_hat, E = tf_mixed_norm_solver(M, G,
-                                alpha_space, alpha_time, maxit=200,
-                                tol=1e-8, verbose=True,
-                                n_orient=1, tstep=4, wsize=32)
-
-    assert_array_equal(np.where(active_set_hat)[0], active_set)
+    X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
+        M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
+        n_orient=1, tstep=4, wsize=32)
+    assert_array_equal(np.where(active_set_hat_tf)[0], active_set)
 
 
 def test_tf_mxne_vs_mxne():
     """Test equivalence of TF-MxNE (with alpha_time=0) and MxNE"""
-    alpha_space = 60
-    alpha_time = 0
+    alpha_space = 60.
+    alpha_time = 0.
 
     M, G, active_set = _generate_tf_data()
 
-    X_hat, active_set_hat, E = tf_mixed_norm_solver(M, G,
-                                alpha_space, alpha_time, maxit=200,
-                                tol=1e-8, verbose=True, debias=False,
-                                n_orient=1, tstep=4, wsize=32)
+    X_hat_tf, active_set_hat_tf, E = tf_mixed_norm_solver(
+        M, G, alpha_space, alpha_time, maxit=200, tol=1e-8, verbose=True,
+        debias=False, n_orient=1, tstep=4, wsize=32)
 
     # Also run L21 and check that we get the same
-    X_hat_l21, _, _ = mixed_norm_solver(M, G, alpha_space, maxit=200,
-                            tol=1e-8, verbose=False, n_orient=1,
-                            active_set_size=None, debias=False)
-    assert_array_almost_equal(X_hat, X_hat_l21, decimal=2)
+    X_hat_l21, _, _ = mixed_norm_solver(
+        M, G, alpha_space, maxit=200, tol=1e-8, verbose=False, n_orient=1,
+        active_set_size=None, debias=False)
+
+    assert_allclose(X_hat_tf, X_hat_l21, rtol=1e-1)
+
+
+def test_iterative_reweighted_mxne():
+    """Test convergence of irMxNE solver"""
+    n, p, t, alpha = 30, 40, 20, 1
+    rng = np.random.RandomState(0)
+    G = rng.randn(n, p)
+    G /= np.std(G, axis=0)[None, :]
+    X = np.zeros((p, t))
+    X[0] = 3
+    X[4] = -2
+    M = np.dot(G, X)
+
+    X_hat_l21, _, _ = mixed_norm_solver(
+        M, G, alpha, maxit=1000, tol=1e-8, verbose=False, n_orient=1,
+        active_set_size=None, debias=False, solver='bcd')
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=False, solver='bcd')
+    X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 1, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=False, solver='prox')
+    assert_allclose(X_hat_bcd, X_hat_l21, rtol=1e-3)
+    assert_allclose(X_hat_prox, X_hat_l21, rtol=1e-3)
+
+    X_hat_prox, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='prox')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+        debias=True, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=None,
+        debias=True, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 4])
+    assert_array_almost_equal(X_hat_prox, X_hat_cd, 5)
+    assert_array_almost_equal(X_hat_bcd, X_hat_cd, 5)
+
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+        debias=True, n_orient=2, solver='bcd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    # suppress a coordinate-descent warning here
+    with warnings.catch_warnings(record=True):
+        X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+            M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+            debias=True, n_orient=2, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
+    assert_array_equal(X_hat_bcd, X_hat_cd, 5)
+
+    X_hat_bcd, active_set, _ = iterative_mixed_norm_solver(
+        M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2, debias=True,
+        n_orient=5)
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    with warnings.catch_warnings(record=True):  # coordinate-ascent warning
+        X_hat_cd, active_set, _ = iterative_mixed_norm_solver(
+            M, G, alpha, 5, maxit=1000, tol=1e-8, active_set_size=2,
+            debias=True, n_orient=5, solver='cd')
+    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
+    assert_array_equal(X_hat_bcd, X_hat_cd, 5)
diff --git a/mne/io/__init__.py b/mne/io/__init__.py
index 0d74a62..38b60f3 100644
--- a/mne/io/__init__.py
+++ b/mne/io/__init__.py
@@ -6,9 +6,13 @@
 # License: BSD (3-clause)
 
 from .open import fiff_open, show_fiff, _fiff_get_fid
-from .meas_info import read_fiducials, write_fiducials, read_info, write_info
+from .meas_info import (read_fiducials, write_fiducials, read_info, write_info,
+                        _empty_info)
+
+from .proj import make_eeg_average_ref_proj
+from .tag import _loc_to_coil_trans, _coil_trans_to_loc, _loc_to_eeg_loc
+from .base import _BaseRaw
 
-from .proj import proj_equal, make_eeg_average_ref_proj
 from . import array
 from . import base
 from . import brainvision
@@ -25,9 +29,57 @@ from .brainvision import read_raw_brainvision
 from .bti import read_raw_bti
 from .edf import read_raw_edf
 from .egi import read_raw_egi
-from .kit import read_raw_kit
+from .kit import read_raw_kit, read_epochs_kit
+from .fiff import read_raw_fif
 
 # for backward compatibility
-from .fiff import RawFIFF
-from .fiff import RawFIFF as Raw
-from .base import concatenate_raws, get_chpi_positions, set_eeg_reference
+from .fiff import RawFIF
+from .fiff import RawFIF as Raw
+from .base import concatenate_raws
+from .reference import (set_eeg_reference, set_bipolar_reference,
+                        add_reference_channels)
+from ..utils import deprecated
+
+
+ at deprecated('mne.io.get_chpi_positions is deprecated and will be removed in '
+            'v0.11, please use mne.get_chpi_positions')
+def get_chpi_positions(raw, t_step=None, verbose=None):
+    """Extract head positions
+
+    Note that the raw instance must have CHPI channels recorded.
+
+    Parameters
+    ----------
+    raw : instance of Raw | str
+        Raw instance to extract the head positions from. Can also be a
+        path to a Maxfilter log file (str).
+    t_step : float | None
+        Sampling interval to use when converting data. If None, it will
+        be automatically determined. By default, a sampling interval of
+        1 second is used if processing a raw data. If processing a
+        Maxfilter log file, this must be None because the log file
+        itself will determine the sampling interval.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    translation : ndarray, shape (N, 3)
+        Translations at each time point.
+    rotation : ndarray, shape (N, 3, 3)
+        Rotations at each time point.
+    t : ndarray, shape (N,)
+        The time points.
+
+    Notes
+    -----
+    The digitized HPI head frame y is related to the frame position X as:
+
+        Y = np.dot(rotation, X) + translation
+
+    Note that if a Maxfilter log file is being processed, the start time
+    may not use the same reference point as the rest of mne-python (i.e.,
+    it could be referenced relative to raw.first_samp or something else).
+    """
+    from ..chpi import get_chpi_positions
+    return get_chpi_positions(raw, t_step, verbose)
diff --git a/mne/io/array/array.py b/mne/io/array/array.py
index 3affb91..8231c61 100644
--- a/mne/io/array/array.py
+++ b/mne/io/array/array.py
@@ -1,4 +1,3 @@
-
 """Tools for creating Raw objects from numpy arrays"""
 
 # Authors: Eric Larson <larson.eric.d at gmail.com>
@@ -7,11 +6,8 @@
 
 import numpy as np
 
-from ..constants import FIFF
-from ..meas_info import Info
 from ..base import _BaseRaw
 from ...utils import verbose, logger
-from ...externals.six import string_types
 
 
 class RawArray(_BaseRaw):
@@ -22,8 +18,14 @@ class RawArray(_BaseRaw):
     data : array, shape (n_channels, n_times)
         The channels' time series.
     info : instance of Info
-        Info dictionary. Consider using ``create_info`` to populate
+        Info dictionary. Consider using `create_info` to populate
         this structure.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    EpochsArray, EvokedArray, create_info
     """
     @verbose
     def __init__(self, data, info, verbose=None):
@@ -40,24 +42,7 @@ class RawArray(_BaseRaw):
         if len(data) != len(info['ch_names']):
             raise ValueError('len(data) does not match len(info["ch_names"])')
         assert len(info['ch_names']) == info['nchan']
-
-        cals = np.zeros(info['nchan'])
-        for k in range(info['nchan']):
-            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
-
-        self.verbose = verbose
-        self.cals = cals
-        self.rawdir = None
-        self.proj = None
-        self.comp = None
-        self._filenames = list()
-        self.preload = True
-        self.info = info
-        self._data = data
-        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
-        self._times = np.arange(self.first_samp,
-                                self.last_samp + 1) / info['sfreq']
-        self._projectors = list()
+        super(RawArray, self).__init__(info, data, verbose=verbose)
         logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
                     self.first_samp, self.last_samp,
                     float(self.first_samp) / info['sfreq'],
diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py
index 2dc03fc..3e58b1b 100644
--- a/mne/io/array/tests/test_array.py
+++ b/mne/io/array/tests/test_array.py
@@ -6,34 +6,38 @@ from __future__ import print_function
 
 import os.path as op
 import warnings
+import matplotlib
 
-from numpy.testing import (assert_array_almost_equal, assert_allclose,
-                           assert_array_equal)
+from numpy.testing import assert_array_almost_equal, assert_allclose
 from nose.tools import assert_equal, assert_raises, assert_true
-from mne import find_events, Epochs, pick_types
+from mne import find_events, Epochs, pick_types, concatenate_raws
 from mne.io import Raw
 from mne.io.array import RawArray
 from mne.io.meas_info import create_info, _kind_dict
-from mne.utils import _TempDir
+from mne.utils import _TempDir, slow_test, requires_version
+
+matplotlib.use('Agg')  # for testing don't use X server
 
 warnings.simplefilter('always')  # enable b/c these tests might throw warnings
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
 fif_fname = op.join(base_dir, 'test_raw.fif')
 
-tempdir = _TempDir()
-
 
+ at slow_test
+ at requires_version('scipy', '0.12')
 def test_array_raw():
     """Test creating raw from array
     """
+    import matplotlib.pyplot as plt
+    tempdir = _TempDir()
     # creating
     raw = Raw(fif_fname).crop(2, 5, copy=False)
     data, times = raw[:, :]
     sfreq = raw.info['sfreq']
     ch_names = [(ch[4:] if 'STI' not in ch else ch)
                 for ch in raw.info['ch_names']]  # change them, why not
-    #del raw
+    # del raw
     types = list()
     for ci in range(102):
         types.extend(('grad', 'grad', 'mag'))
@@ -54,6 +58,10 @@ def test_array_raw():
     data2, times2 = raw2[:, :]
     assert_allclose(data, data2)
     assert_allclose(times, times2)
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw2.copy(), raw2])
+    assert_equal(raw_concat.n_times, 2 * raw2.n_times)
+    assert_true('RawArray' in repr(raw2))
 
     # saving
     temp_fname = op.join(tempdir, 'raw.fif')
@@ -88,17 +96,19 @@ def test_array_raw():
     assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
 
     # plotting
-    import matplotlib
-    matplotlib.use('Agg')  # for testing don't use X server
     raw2.plot()
-    raw2.plot_psds()
+    raw2.plot_psd()
+    plt.close('all')
 
     # epoching
     events = find_events(raw2, stim_channel='STI 014')
     events[:, 2] = 1
     assert_true(len(events) > 2)
     epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
-    epochs.plot_drop_log(return_fig=True)
-    epochs.plot()
+    epochs.plot_drop_log()
+    with warnings.catch_warnings(record=True):  # deprecation
+        warnings.simplefilter('always')
+        epochs.plot()
     evoked = epochs.average()
     evoked.plot()
+    plt.close('all')
diff --git a/mne/io/base.py b/mne/io/base.py
index a25f4cc..ab5e16e 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -2,10 +2,11 @@
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
 #
 # License: BSD (3-clause)
 
-from math import floor, ceil
 import copy
 from copy import deepcopy
 import warnings
@@ -13,41 +14,490 @@ import os
 import os.path as op
 
 import numpy as np
-from scipy.signal import hilbert
 from scipy import linalg
 
 from .constants import FIFF
-from .pick import pick_types, channel_type, pick_channels
+from .pick import pick_types, channel_type, pick_channels, pick_info
 from .meas_info import write_meas_info
-from .proj import (setup_proj, activate_proj, proj_equal, ProjMixin,
-                   _has_eeg_average_ref_proj, make_eeg_average_ref_proj)
-from ..channels import ContainsMixin, PickDropChannelsMixin
+from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin
+from ..channels.channels import (ContainsMixin, UpdateChannelsMixin,
+                                 SetChannelsMixin, InterpolationMixin)
+from ..channels.montage import read_montage, _set_montage, Montage
 from .compensator import set_current_comp
 from .write import (start_file, end_file, start_block, end_block,
                     write_dau_pack16, write_float, write_double,
                     write_complex64, write_complex128, write_int,
-                    write_id, write_string)
+                    write_id, write_string, _get_split_size)
 
 from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
-                      notch_filter, band_stop_filter, resample)
+                      notch_filter, band_stop_filter, resample,
+                      _resample_stim_channels)
+from ..fixes import in1d
 from ..parallel import parallel_func
-from ..utils import (_check_fname, estimate_rank, _check_pandas_installed,
+from ..utils import (_check_fname, _check_pandas_installed,
+                     _check_pandas_index_arguments,
                      check_fname, _get_stim_channel, object_hash,
-                     logger, verbose)
-from ..viz import plot_raw, plot_raw_psds, _mutable_defaults
+                     logger, verbose, _time_mask, deprecated)
+from ..viz import plot_raw, plot_raw_psd
+from ..defaults import _handle_default
 from ..externals.six import string_types
-from ..event import concatenate_events
+from ..event import find_events, concatenate_events
 
 
-class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
-    """Base class for Raw data"""
+class ToDataFrameMixin(object):
+    '''Class to add to_data_frame capabilities to certain classes.'''
+    def _get_check_picks(self, picks, picks_check):
+        if picks is None:
+            picks = list(range(self.info['nchan']))
+        else:
+            if not in1d(picks, np.arange(len(picks_check))).all():
+                raise ValueError('At least one picked channel is not present '
+                                 'in this object instance.')
+        return picks
+
+    def to_data_frame(self, picks=None, index=None, scale_time=1e3,
+                      scalings=None, copy=True, start=None, stop=None):
+        """Export data in tabular structure as a pandas DataFrame.
+
+        Columns and indices will depend on the object being converted.
+        Generally this will include as much relevant information as
+        possible for the data type being converted. This makes it easy
+        to convert data for use in packages that utilize dataframes,
+        such as statsmodels or seaborn.
+
+        Parameters
+        ----------
+        picks : array-like of int | None
+            If None only MEG and EEG channels are kept
+            otherwise the channels indices in picks are kept.
+        index : tuple of str | None
+            Column to be used as index for the data. Valid string options
+            are 'epoch', 'time' and 'condition'. If None, all three info
+            columns will be included in the table as categorial data.
+        scale_time : float
+            Scaling to be applied to time units.
+        scalings : dict | None
+            Scaling to be applied to the channels picked. If None, defaults to
+            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.
+        copy : bool
+            If true, data will be copied. Else data may be modified in place.
+        start : int | None
+            If it is a Raw object, this defines a starting index for creating
+            the dataframe from a slice. The times will be interpolated from the
+            index and the sampling rate of the signal.
+        stop : int | None
+            If it is a Raw object, this defines a stop index for creating
+            the dataframe from a slice. The times will be interpolated from the
+            index and the sampling rate of the signal.
+
+        Returns
+        -------
+        df : instance of pandas.core.DataFrame
+            A dataframe suitable for usage with other
+            statistical/plotting/analysis packages. Column/Index values will
+            depend on the object type being converted, but should be
+            human-readable.
+        """
+        from ..epochs import _BaseEpochs
+        from ..evoked import Evoked
+        from ..source_estimate import _BaseSourceEstimate
+
+        pd = _check_pandas_installed()
+        mindex = list()
+        # Treat SourceEstimates special because they don't have the same info
+        if isinstance(self, _BaseSourceEstimate):
+            if self.subject is None:
+                default_index = ['time']
+            else:
+                default_index = ['subject', 'time']
+            data = self.data.T
+            times = self.times
+            shape = data.shape
+            mindex.append(('subject', np.repeat(self.subject, shape[0])))
+
+            if isinstance(self.vertices, list):
+                # surface source estimates
+                col_names = [i for e in [
+                    ['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)
+                     for vert in vertno]
+                    for ii, vertno in enumerate(self.vertices)]
+                    for i in e]
+            else:
+                # volume source estimates
+                col_names = ['VOL {0}'.format(vert) for vert in self.vertices]
+        elif isinstance(self, (_BaseEpochs, _BaseRaw, Evoked)):
+            picks = self._get_check_picks(picks, self.ch_names)
+            if isinstance(self, _BaseEpochs):
+                default_index = ['condition', 'epoch', 'time']
+                data = self.get_data()[:, picks, :]
+                times = self.times
+                n_epochs, n_picks, n_times = data.shape
+                data = np.hstack(data).T  # (time*epochs) x signals
+
+                # Multi-index creation
+                times = np.tile(times, n_epochs)
+                id_swapped = dict((v, k) for k, v in self.event_id.items())
+                names = [id_swapped[k] for k in self.events[:, 2]]
+                mindex.append(('condition', np.repeat(names, n_times)))
+                mindex.append(('epoch',
+                              np.repeat(np.arange(n_epochs), n_times)))
+                col_names = [self.ch_names[k] for k in picks]
+
+            elif isinstance(self, (_BaseRaw, Evoked)):
+                default_index = ['time']
+                if isinstance(self, _BaseRaw):
+                    data, times = self[picks, start:stop]
+                elif isinstance(self, Evoked):
+                    data = self.data[picks, :]
+                    times = self.times
+                    n_picks, n_times = data.shape
+                data = data.T
+                col_names = [self.ch_names[k] for k in picks]
+
+            types = [channel_type(self.info, idx) for idx in picks]
+            n_channel_types = 0
+            ch_types_used = []
+
+            scalings = _handle_default('scalings', scalings)
+            for t in scalings.keys():
+                if t in types:
+                    n_channel_types += 1
+                    ch_types_used.append(t)
+
+            for t in ch_types_used:
+                scaling = scalings[t]
+                idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+                if len(idx) > 0:
+                    data[:, idx] *= scaling
+        else:
+            # In case some other object gets this mixin w/o an explicit check
+            raise NameError('Object must be one of Raw, Epochs, Evoked,  or ' +
+                            'SourceEstimate. This is {0}'.format(type(self)))
+
+        # Make sure that the time index is scaled correctly
+        times = np.round(times * scale_time)
+        mindex.append(('time', times))
+
+        if index is not None:
+            _check_pandas_index_arguments(index, default_index)
+        else:
+            index = default_index
+
+        if copy is True:
+            data = data.copy()
+
+        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
+
+        df = pd.DataFrame(data, columns=col_names)
+        for i, (k, v) in enumerate(mindex):
+            df.insert(i, k, v)
+        if index is not None:
+            if 'time' in index:
+                logger.info('Converting time column to int64...')
+                df['time'] = df['time'].astype(np.int64)
+            df.set_index(index, inplace=True)
+        if all(i in default_index for i in index):
+            df.columns.name = 'signal'
+        return df
+
+
+def _check_fun(fun, d, *args, **kwargs):
+    want_shape = d.shape
+    d = fun(d, *args, **kwargs)
+    if not isinstance(d, np.ndarray):
+        raise TypeError('Return value must be an ndarray')
+    if d.shape != want_shape:
+        raise ValueError('Return data must have shape %s not %s'
+                         % (want_shape, d.shape))
+    return d
+
+
+class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
+               SetChannelsMixin, InterpolationMixin, ToDataFrameMixin):
+    """Base class for Raw data
+
+    Subclasses must provide the following methods:
+
+        * _read_segment_file(self, data, idx, offset, fi, start, stop,
+                             cals, mult)
+          (only needed for types that support on-demand disk reads)
+
+    The `_BaseRaw._raw_extras` list can contain whatever data is necessary for
+    such on-demand reads. For `RawFIF` this means a list of variables formerly
+    known as ``_rawdirs``.
+    """
     @verbose
-    def __init__(self, *args, **kwargs):
-        raise NotImplementedError
+    def __init__(self, info, preload=False,
+                 first_samps=(0,), last_samps=None,
+                 filenames=(None,), raw_extras=(None,),
+                 comp=None, orig_comp_grade=None,
+                 orig_format='double', dtype=np.float64,
+                 verbose=None):
+        # wait until the end to preload data, but triage here
+        if isinstance(preload, np.ndarray):
+            # some functions (e.g., filtering) only work w/64-bit data
+            if preload.dtype not in (np.float64, np.complex128):
+                raise RuntimeError('datatype must be float64 or complex128, '
+                                   'not %s' % preload.dtype)
+            if preload.dtype != dtype:
+                raise ValueError('preload and dtype must match')
+            self._data = preload
+            self.preload = True
+            last_samps = [self._data.shape[1] - 1]
+            load_from_disk = False
+        else:
+            if last_samps is None:
+                raise ValueError('last_samps must be given unless preload is '
+                                 'an ndarray')
+            if preload is False:
+                self.preload = False
+                load_from_disk = False
+            elif preload is not True and not isinstance(preload, string_types):
+                raise ValueError('bad preload: %s' % preload)
+            else:
+                load_from_disk = True
+        self._last_samps = np.array(last_samps)
+        self._first_samps = np.array(first_samps)
+        info._check_consistency()  # make sure subclass did a good job
+        self.info = info
+        cals = np.empty(info['nchan'])
+        for k in range(info['nchan']):
+            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
+        self.verbose = verbose
+        self._cals = cals
+        self._raw_extras = list(raw_extras)
+        self.comp = comp
+        self._orig_comp_grade = orig_comp_grade
+        self._filenames = list(filenames)
+        self.orig_format = orig_format
+        self._projectors = list()
+        self._projector = None
+        self._dtype_ = dtype
+        # If we have True or a string, actually do the preloading
+        if load_from_disk:
+            self._preload_data(preload)
+        self._update_times()
+
+    @property
+    def _dtype(self):
+        """dtype for loading data (property so subclasses can override)"""
+        # most classes only store real data, they won't need anything special
+        return self._dtype_
+
+    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+                      projector=None, verbose=None):
+        """Read a chunk of raw data
+
+        Parameters
+        ----------
+        start : int, (optional)
+            first sample to include (first is 0). If omitted, defaults to the
+            first sample in data.
+        stop : int, (optional)
+            First sample to not include.
+            If omitted, data is included to the end.
+        sel : array, optional
+            Indices of channels to select.
+        data_buffer : array or str, optional
+            numpy array to fill with data read, must have the correct shape.
+            If str, a np.memmap with the correct data type will be used
+            to store the data.
+        projector : array
+            SSP operator to apply to the data.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        data : array, [channels x samples]
+           the data matrix (channels x samples).
+        times : array, [samples]
+            returns the time values corresponding to the samples.
+        """
+        #  Initial checks
+        start = int(start)
+        stop = self.n_times if stop is None else min([int(stop), self.n_times])
+
+        if start >= stop:
+            raise ValueError('No data in this range')
+
+        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
+                    (start, stop - 1, start / float(self.info['sfreq']),
+                     (stop - 1) / float(self.info['sfreq'])))
+
+        #  Initialize the data and calibration vector
+        n_sel_channels = self.info['nchan'] if sel is None else len(sel)
+        # convert sel to a slice if possible for efficiency
+        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
+            sel = slice(sel[0], sel[-1] + 1)
+        idx = slice(None, None, None) if sel is None else sel
+        data_shape = (n_sel_channels, stop - start)
+        dtype = self._dtype
+        if isinstance(data_buffer, np.ndarray):
+            if data_buffer.shape != data_shape:
+                raise ValueError('data_buffer has incorrect shape')
+            data = data_buffer
+        elif isinstance(data_buffer, string_types):
+            # use a memmap
+            data = np.memmap(data_buffer, mode='w+',
+                             dtype=dtype, shape=data_shape)
+        else:
+            data = np.zeros(data_shape, dtype=dtype)
+
+        # deal with having multiple files accessed by the raw object
+        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
+                                                   dtype='int')))
+        cumul_lens = np.cumsum(cumul_lens)
+        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
+                                    np.greater_equal(stop - 1,
+                                                     cumul_lens[:-1]))
+
+        # set up cals and mult (cals, compensation, and projector)
+        cals = self._cals.ravel()[np.newaxis, :]
+        if self.comp is None and projector is None:
+            mult = None
+        else:
+            mult = list()
+            for ri in range(len(self._first_samps)):
+                if self.comp is not None:
+                    if projector is not None:
+                        mul = self.comp * cals
+                        mul = np.dot(projector[idx], mul)
+                    else:
+                        mul = self.comp[idx] * cals
+                elif projector is not None:
+                    mul = projector[idx] * cals
+                else:
+                    mul = np.diag(self._cals.ravel())[idx]
+                mult.append(mul)
+        cals = cals.T[idx]
+
+        # read from necessary files
+        offset = 0
+        for fi in np.nonzero(files_used)[0]:
+            start_file = self._first_samps[fi]
+            # first iteration (only) could start in the middle somewhere
+            if offset == 0:
+                start_file += start - cumul_lens[fi]
+            stop_file = np.min([stop - 1 - cumul_lens[fi] +
+                                self._first_samps[fi], self._last_samps[fi]])
+            if start_file < self._first_samps[fi] or \
+                    stop_file > self._last_samps[fi] or \
+                    stop_file < start_file or start_file > stop_file:
+                raise ValueError('Bad array indexing, could be a bug')
+
+            self._read_segment_file(data, idx, offset, fi,
+                                    start_file, stop_file, cals, mult)
+            offset += stop_file - start_file + 1
+
+        logger.info('[done]')
+        times = np.arange(start, stop) / self.info['sfreq']
+        return data, times
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a segment of data from a file
+
+        Only needs to be implemented for readers that support
+        ``preload=False``.
 
-    def _read_segment(start, stop, sel, projector, verbose):
+        Parameters
+        ----------
+        data : ndarray, shape (len(idx), n_samp)
+            The data array. Should be modified inplace.
+        idx : ndarray | slice
+            The requested channel indices.
+        offset : int
+            Offset. Data should be stored in something like::
+
+                data[:, offset:offset + (start - stop + 1)] = r[idx]
+
+        fi : int
+            The file index that must be read from.
+        start : int
+            The start sample in the given file.
+        stop : int
+            The stop sample in the given file (inclusive).
+        cals : ndarray, shape (len(idx), 1)
+            Channel calibrations (already sub-indexed).
+        mult : ndarray, shape (len(idx), len(info['chs']) | None
+            The compensation + projection + cals matrix, if applicable.
+        """
         raise NotImplementedError
 
+    @deprecated("This method has been renamed 'load_data' and will be removed "
+                "in v0.11.")
+    def preload_data(self, verbose=None):
+        """Preload raw data
+
+        Parameters
+        ----------
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object with data.
+
+        Notes
+        -----
+        This function will load raw data if it was not already preloaded.
+        If data were already preloaded, it will do nothing.
+        """
+        return self.load_data(verbose=verbose)
+
+    @verbose
+    def load_data(self, verbose=None):
+        """Load raw data
+
+        Parameters
+        ----------
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object with data.
+
+        Notes
+        -----
+        This function will load raw data if it was not already preloaded.
+        If data were already preloaded, it will do nothing.
+
+        .. versionadded:: 0.10.0
+        """
+        if not self.preload:
+            self._preload_data(True)
+        return self
+
+    def _preload_data(self, preload):
+        """This function actually preloads the data"""
+        data_buffer = preload if isinstance(preload, string_types) else None
+        self._data = self._read_segment(data_buffer=data_buffer)[0]
+        assert len(self._data) == self.info['nchan']
+        self.preload = True
+        self.close()
+
+    def _update_times(self):
+        """Helper to update times"""
+        self._times = np.arange(self.n_times) / float(self.info['sfreq'])
+        # make it immutable
+        self._times.flags.writeable = False
+
+    @property
+    def first_samp(self):
+        return self._first_samps[0]
+
+    @property
+    def last_samp(self):
+        return self.first_samp + sum(self._raw_lengths) - 1
+
+    @property
+    def _raw_lengths(self):
+        return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]
+
     def __del__(self):
         # remove file for memmap
         if hasattr(self, '_data') and hasattr(self._data, 'filename'):
@@ -55,7 +505,10 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             filename = self._data.filename
             del self._data
             # Now file can be removed
-            os.remove(filename)
+            try:
+                os.remove(filename)
+            except OSError:
+                pass  # ignore file that no longer exists
 
     def __enter__(self):
         """ Entering with block """
@@ -73,15 +526,6 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             raise RuntimeError('Cannot hash raw unless preloaded')
         return object_hash(dict(info=self.info, data=self._data))
 
-    def _add_eeg_ref(self, add_eeg_ref):
-        """Helper to add an average EEG reference"""
-        if add_eeg_ref:
-            eegs = pick_types(self.info, meg=False, eeg=True, ref_meg=False)
-            projs = self.info['projs']
-            if len(eegs) > 0 and not _has_eeg_average_ref_proj(projs):
-                eeg_ref = make_eeg_average_ref_proj(self.info, activate=False)
-                projs.append(eeg_ref)
-
     def _parse_get_set_params(self, item):
         # make sure item is a tuple
         if not isinstance(item, tuple):  # only channel selection passed
@@ -109,7 +553,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             # Let's do automated type conversion to integer here
             if np.array(item[1]).dtype.kind == 'i':
                 item1 = int(item1)
-            if isinstance(item1, int):
+            if isinstance(item1, (int, np.integer)):
                 start, stop, step = item1, item1 + 1, 1
             else:
                 raise ValueError('Must pass int or slice to __getitem__')
@@ -119,7 +563,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if (step is not None) and (step is not 1):
             raise ValueError('step needs to be 1 : %d given' % step)
 
-        if isinstance(sel, int):
+        if isinstance(sel, (int, np.integer)):
             sel = np.array([sel])
 
         if sel is not None and len(sel) == 0:
@@ -131,7 +575,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         """getting raw data content with python slicing"""
         sel, start, stop = self._parse_get_set_params(item)
         if self.preload:
-            data, times = self._data[sel, start:stop], self._times[start:stop]
+            data, times = self._data[sel, start:stop], self.times[start:stop]
         else:
             data, times = self._read_segment(start=start, stop=stop, sel=sel,
                                              projector=self._projector,
@@ -151,12 +595,18 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
     def anonymize(self):
         """Anonymize data
 
-        This function will remove info['subject_info'] if it exists."""
+        This function will remove info['subject_info'] if it exists.
+
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object. Operates in place.
+        """
         self.info._anonymize()
+        return self
 
     @verbose
-    def apply_function(self, fun, picks, dtype, n_jobs, verbose=None, *args,
-                       **kwargs):
+    def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):
         """ Apply a function to a subset of channels.
 
         The function "fun" is applied to the channels defined in "picks". The
@@ -180,25 +630,27 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             A function to be applied to the channels. The first argument of
             fun has to be a timeseries (numpy.ndarray). The function must
             return an numpy.ndarray with the same size as the input.
-        picks : array-like of int
-            Indices of channels to apply the function to.
+        picks : array-like of int | None
+            Indices of channels to apply the function to. If None, all
+            M-EEG channels are used.
         dtype : numpy.dtype
             Data type to use for raw data after applying the function. If None
             the data type is not modified.
         n_jobs: int
             Number of jobs to run in parallel.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-            Defaults to self.verbose.
         *args :
             Additional positional arguments to pass to fun (first pos. argument
             of fun is the timeseries of a channel).
         **kwargs :
-            Keyword arguments to pass to fun.
+            Keyword arguments to pass to fun. Note that if "verbose" is passed
+            as a member of ``kwargs``, it will be consumed and will override
+            the default mne-python verbose level (see mne.verbose).
         """
         if not self.preload:
             raise RuntimeError('Raw data needs to be preloaded. Use '
                                'preload=True (or string) in the constructor.')
+        if picks is None:
+            picks = pick_types(self.info, meg=True, eeg=True, exclude=[])
 
         if not callable(fun):
             raise ValueError('fun needs to be a function')
@@ -210,17 +662,19 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if n_jobs == 1:
             # modify data inplace to save memory
             for idx in picks:
-                self._data[idx, :] = fun(data_in[idx, :], *args, **kwargs)
+                self._data[idx, :] = _check_fun(fun, data_in[idx, :],
+                                                *args, **kwargs)
         else:
             # use parallel function
-            parallel, p_fun, _ = parallel_func(fun, n_jobs)
-            data_picks_new = parallel(p_fun(data_in[p], *args, **kwargs)
+            parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
+            data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)
                                       for p in picks)
             for pp, p in enumerate(picks):
                 self._data[p, :] = data_picks_new[pp]
 
     @verbose
-    def apply_hilbert(self, picks, envelope=False, n_jobs=1, verbose=None):
+    def apply_hilbert(self, picks, envelope=False, n_jobs=1, n_fft=None,
+                      verbose=None):
         """ Compute analytic signal or envelope for a subset of channels.
 
         If envelope=False, the analytic signal for the channels defined in
@@ -252,6 +706,10 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             Compute the envelope signal of each channel.
         n_jobs: int
             Number of jobs to run in parallel.
+        n_fft : int > self.n_times | None
+            Points to use in the FFT for Hilbert transformation. The signal
+            will be padded with zeros before computing Hilbert, then cut back
+            to original length. If None, n == self.n_times.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
@@ -269,11 +727,21 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         MNE inverse solution, the enevlope in source space can be obtained
         by computing the analytic signal in sensor space, applying the MNE
         inverse, and computing the envelope in source space.
+
+        Also note that the n_fft parameter will allow you to pad the signal
+        with zeros before performing the Hilbert transform. This padding
+        is cut off, but it may result in a slightly different result
+        (particularly around the edges). Use at your own risk.
         """
-        if envelope:
-            self.apply_function(_envelope, picks, None, n_jobs)
+        n_fft = self.n_times if n_fft is None else n_fft
+        if n_fft < self.n_times:
+            raise ValueError("n_fft must be greater than n_times")
+        if envelope is True:
+            self.apply_function(_my_hilbert, picks, None, n_jobs, n_fft,
+                                envelope=envelope)
         else:
-            self.apply_function(hilbert, picks, np.complex64, n_jobs)
+            self.apply_function(_my_hilbert, picks, np.complex64, n_jobs,
+                                n_fft, envelope=envelope)
 
     @verbose
     def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
@@ -289,16 +757,17 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         l_freq and h_freq are the frequencies below which and above which,
         respectively, to filter out of the data. Thus the uses are:
-            l_freq < h_freq: band-pass filter
-            l_freq > h_freq: band-stop filter
-            l_freq is not None, h_freq is None: low-pass filter
-            l_freq is None, h_freq is not None: high-pass filter
 
-        Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
-              additional time points need to be temporarily stored in memory.
+            * ``l_freq < h_freq``: band-pass filter
+            * ``l_freq > h_freq``: band-stop filter
+            * ``l_freq is not None and h_freq is None``: high-pass filter
+            * ``l_freq is None and h_freq is not None``: low-pass filter
+
+        If n_jobs > 1, more memory is required as "len(picks) * n_times"
+        additional time points need to be temporarily stored in memory.
 
-        Note: self.info['lowpass'] and self.info['highpass'] are only updated
-              with picks=None.
+        self.info['lowpass'] and self.info['highpass'] are only updated
+        with picks=None.
 
         Parameters
         ----------
@@ -317,10 +786,15 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             used (faster for long signals). If str, a human-readable time in
             units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
             to the shortest power-of-two length at least that duration.
+            Not used for 'iir' filters.
         l_trans_bandwidth : float
-            Width of the transition band at the low cut-off frequency in Hz.
+            Width of the transition band at the low cut-off frequency in Hz
+            (high pass or cutoff 1 in bandpass). Not used if 'order' is
+            specified in iir_params.
         h_trans_bandwidth : float
-            Width of the transition band at the high cut-off frequency in Hz.
+            Width of the transition band at the high cut-off frequency in Hz
+            (low pass or cutoff 2 in bandpass). Not used if 'order' is
+            specified in iir_params.
         n_jobs : int | str
             Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
             is installed properly, CUDA is initialized, and method='fft'.
@@ -334,6 +808,10 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
+
+        See Also
+        --------
+        mne.Epochs.savgol_filter
         """
         if verbose is None:
             verbose = self.verbose
@@ -364,31 +842,36 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
             # update info if filter is applied to all data channels,
             # and it's not a band-stop filter
-            if h_freq is not None and (l_freq is None or l_freq < h_freq) and \
-                    h_freq < self.info['lowpass']:
-                self.info['lowpass'] = h_freq
-            if l_freq is not None and (h_freq is None or l_freq < h_freq) and \
-                    l_freq > self.info['highpass']:
-                self.info['highpass'] = l_freq
+            if h_freq is not None:
+                if (l_freq is None or l_freq < h_freq) and \
+                   (self.info["lowpass"] is None or
+                   h_freq < self.info['lowpass']):
+                        self.info['lowpass'] = h_freq
+            if l_freq is not None:
+                if (h_freq is None or l_freq < h_freq) and \
+                   (self.info["highpass"] is None or
+                   l_freq > self.info['highpass']):
+                        self.info['highpass'] = l_freq
         if l_freq is None and h_freq is not None:
             logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
             low_pass_filter(self._data, fs, h_freq,
                             filter_length=filter_length,
-                            trans_bandwidth=l_trans_bandwidth, method=method,
+                            trans_bandwidth=h_trans_bandwidth, method=method,
                             iir_params=iir_params, picks=picks, n_jobs=n_jobs,
                             copy=False)
         if l_freq is not None and h_freq is None:
             logger.info('High-pass filtering at %0.2g Hz' % l_freq)
             high_pass_filter(self._data, fs, l_freq,
                              filter_length=filter_length,
-                             trans_bandwidth=h_trans_bandwidth, method=method,
+                             trans_bandwidth=l_trans_bandwidth, method=method,
                              iir_params=iir_params, picks=picks, n_jobs=n_jobs,
                              copy=False)
         if l_freq is not None and h_freq is not None:
             if l_freq < h_freq:
                 logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
                             % (l_freq, h_freq))
-                self._data = band_pass_filter(self._data, fs, l_freq, h_freq,
+                self._data = band_pass_filter(
+                    self._data, fs, l_freq, h_freq,
                     filter_length=filter_length,
                     l_trans_bandwidth=l_trans_bandwidth,
                     h_trans_bandwidth=h_trans_bandwidth,
@@ -397,7 +880,8 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             else:
                 logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
                             % (h_freq, l_freq))
-                self._data = band_stop_filter(self._data, fs, h_freq, l_freq,
+                self._data = band_stop_filter(
+                    self._data, fs, h_freq, l_freq,
                     filter_length=filter_length,
                     l_trans_bandwidth=h_trans_bandwidth,
                     h_trans_bandwidth=l_trans_bandwidth, method=method,
@@ -436,6 +920,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             used (faster for long signals). If str, a human-readable time in
             units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
             to the shortest power-of-two length at least that duration.
+            Not used for 'iir' filters.
         notch_widths : float | array of float | None
             Width of each stop band (centred at each freq in freqs) in Hz.
             If None, freqs / 200 is used.
@@ -495,20 +980,29 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                                   picks=picks, n_jobs=n_jobs, copy=False)
 
     @verbose
-    def resample(self, sfreq, npad=100, window='boxcar',
-                 stim_picks=None, n_jobs=1, verbose=None):
+    def resample(self, sfreq, npad=100, window='boxcar', stim_picks=None,
+                 n_jobs=1, events=None, copy=False, verbose=None):
         """Resample data channels.
 
-        Resamples all channels. The data of the Raw object is modified inplace.
+        Resamples all channels.
 
         The Raw object has to be constructed using preload=True (or string).
 
-        WARNING: The intended purpose of this function is primarily to speed
-        up computations (e.g., projection calculation) when precise timing
-        of events is not required, as downsampling raw data effectively
-        jitters trigger timings. It is generally recommended not to epoch
-        downsampled data, but instead epoch and then downsample, as epoching
-        downsampled data jitters triggers.
+        .. warning:: The intended purpose of this function is primarily to
+                     speed up computations (e.g., projection calculation) when
+                     precise timing of events is not required, as downsampling
+                     raw data effectively jitters trigger timings. It is
+                     generally recommended not to epoch downsampled data,
+                     but instead epoch and then downsample, as epoching
+                     downsampled data jitters triggers.
+                     See here for an example:
+
+                         https://gist.github.com/Eric89GXL/01642cb3789992fbca59
+
+                     If resampling the continuous data is desired, it is
+                     recommended to construct events using the original data.
+                     The event onsets can be jointly resampled with the raw
+                     data using the 'events' parameter.
 
         Parameters
         ----------
@@ -527,10 +1021,21 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         n_jobs : int | str
             Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
             is installed properly and CUDA is initialized.
+        events : 2D array, shape (n_events, 3) | None
+            An optional event matrix. When specified, the onsets of the events
+            are resampled jointly with the data.
+        copy : bool
+            Whether to operate on a copy of the data (True) or modify data
+            in-place (False). Defaults to False.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
             Defaults to self.verbose.
 
+        Returns
+        -------
+        raw : instance of Raw
+            The resampled version of the raw object.
+
         Notes
         -----
         For some data, it may be more accurate to use npad=0 to reduce
@@ -538,48 +1043,83 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         """
         if not self.preload:
             raise RuntimeError('Can only resample preloaded data')
+
+        inst = self.copy() if copy else self
+
+        # When no event object is supplied, some basic detection of dropped
+        # events is performed to generate a warning. Finding events can fail
+        # for a variety of reasons, e.g. if no stim channel is present or it is
+        # corrupted. This should not stop the resampling from working. The
+        # warning should simply not be generated in this case.
+        if events is None:
+            try:
+                original_events = find_events(inst)
+            except:
+                pass
+
         sfreq = float(sfreq)
-        o_sfreq = float(self.info['sfreq'])
+        o_sfreq = float(inst.info['sfreq'])
 
-        offsets = np.concatenate(([0], np.cumsum(self._raw_lengths)))
+        offsets = np.concatenate(([0], np.cumsum(inst._raw_lengths)))
         new_data = list()
+
+        ratio = sfreq / o_sfreq
+
         # set up stim channel processing
         if stim_picks is None:
-            stim_picks = pick_types(self.info, meg=False, ref_meg=False,
+            stim_picks = pick_types(inst.info, meg=False, ref_meg=False,
                                     stim=True, exclude=[])
         stim_picks = np.asanyarray(stim_picks)
-        ratio = sfreq / o_sfreq
-        for ri in range(len(self._raw_lengths)):
-            data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]]
+
+        for ri in range(len(inst._raw_lengths)):
+            data_chunk = inst._data[:, offsets[ri]:offsets[ri + 1]]
             new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
                                      n_jobs=n_jobs))
             new_ntimes = new_data[ri].shape[1]
 
-            # Now deal with the stim channels. In empirical testing, it was
-            # faster to resample all channels (above) and then replace the
-            # stim channels than it was to only resample the proper subset
-            # of channels and then use np.insert() to restore the stims
-
-            # figure out which points in old data to subsample
-            # protect against out-of-bounds, which can happen (having
-            # one sample more than expected) due to padding
-            stim_inds = np.minimum(np.floor(np.arange(new_ntimes)
-                                            / ratio).astype(int),
-                                   data_chunk.shape[1] - 1)
-            for sp in stim_picks:
-                new_data[ri][sp] = data_chunk[[sp]][:, stim_inds]
-
-            self._first_samps[ri] = int(self._first_samps[ri] * ratio)
-            self._last_samps[ri] = self._first_samps[ri] + new_ntimes - 1
-            self._raw_lengths[ri] = new_ntimes
-
-        # adjust affected variables
-        self._data = np.concatenate(new_data, axis=1)
-        self.first_samp = self._first_samps[0]
-        self.last_samp = self.first_samp + self._data.shape[1] - 1
-        self.info['sfreq'] = sfreq
-        self._times = (np.arange(self.n_times, dtype=np.float64)
-                       / self.info['sfreq'])
+            # In empirical testing, it was faster to resample all channels
+            # (above) and then replace the stim channels than it was to only
+            # resample the proper subset of channels and then use np.insert()
+            # to restore the stims.
+            if len(stim_picks) > 0:
+                stim_resampled = _resample_stim_channels(
+                    data_chunk[stim_picks], new_data[ri].shape[1],
+                    data_chunk.shape[1])
+                new_data[ri][stim_picks] = stim_resampled
+
+            inst._first_samps[ri] = int(inst._first_samps[ri] * ratio)
+            inst._last_samps[ri] = inst._first_samps[ri] + new_ntimes - 1
+            inst._raw_lengths[ri] = new_ntimes
+
+        inst._data = np.concatenate(new_data, axis=1)
+        inst.info['sfreq'] = sfreq
+        inst._update_times()
+
+        # See the comment above why we ignore all errors here.
+        if events is None:
+            try:
+                # Did we loose events?
+                resampled_events = find_events(inst)
+                if len(resampled_events) != len(original_events):
+                    warnings.warn(
+                        'Resampling of the stim channels caused event '
+                        'information to become unreliable. Consider finding '
+                        'events on the original data and passing the event '
+                        'matrix as a parameter.'
+                    )
+            except:
+                pass
+
+            return inst
+        else:
+            if copy:
+                events = events.copy()
+
+            events[:, 0] = np.minimum(
+                np.round(events[:, 0] * ratio).astype(int),
+                inst._data.shape[1]
+            )
+            return inst, events
 
     def crop(self, tmin=0.0, tmax=None, copy=True):
         """Crop raw data file.
@@ -593,9 +1133,9 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         Parameters
         ----------
         tmin : float
-            New start time (must be >= 0).
+            New start time in seconds (must be >= 0).
         tmax : float | None
-            New end time of the data (cannot exceed data duration).
+            New end time in seconds of the data (cannot exceed data duration).
         copy : bool
             If False Raw is cropped in place.
 
@@ -617,8 +1157,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             raise ValueError('tmax must be less than or equal to the max raw '
                              'time (%0.4f sec)' % max_time)
 
-        smin = raw.time_as_index(tmin)[0]
-        smax = raw.time_as_index(tmax)[0]
+        smin, smax = np.where(_time_mask(self.times, tmin, tmax))[0][[0, -1]]
         cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
                                                    dtype='int')))
         cumul_lens = np.cumsum(cumul_lens)
@@ -630,19 +1169,19 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         raw._first_samps[0] += smin - cumul_lens[keepers[0]]
         raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
         raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
-        raw._raw_lengths = raw._last_samps - raw._first_samps + 1
-        raw.rawdirs = [r for ri, r in enumerate(raw.rawdirs)
-                       if ri in keepers]
-        raw.first_samp = raw._first_samps[0]
-        raw.last_samp = raw.first_samp + (smax - smin)
+        raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)
+                           if ri in keepers]
+        raw._filenames = [r for ri, r in enumerate(raw._filenames)
+                          if ri in keepers]
         if raw.preload:
-            raw._data = raw._data[:, smin:smax + 1]
-            raw._times = np.arange(raw.n_times) / raw.info['sfreq']
+            # slice and copy to avoid the reference to large array
+            raw._data = raw._data[:, smin:smax + 1].copy()
+        raw._update_times()
         return raw
 
     @verbose
     def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
-             drop_small_buffer=False, proj=False, format='single',
+             drop_small_buffer=False, proj=False, fmt='single',
              overwrite=False, split_size='2GB', verbose=None):
         """Save raw data to file
 
@@ -671,11 +1210,11 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             If True the data is saved with the projections applied (active).
             Note: If apply_proj() was used to apply the projections,
             the projectons will be active even if proj is False.
-        format : str
+        fmt : str
             Format to use to save raw data. Valid options are 'double',
             'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
-            16-bit integers, respectively. It is STRONGLY recommended to use
-            'single', as this is backward-compatible, and is standard for
+            16-bit integers, respectively. It is **strongly** recommended to
+            use 'single', as this is backward-compatible, and is standard for
             maintaining precision. Note that using 'short' or 'int' may result
             in loss of precision, complex data cannot be saved as 'short',
             and neither complex data types nor real data stored as 'double'
@@ -696,9 +1235,9 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         Notes
         -----
-        If Raw is a concatenation of several raw files, *be warned* that only
-        the measurement information from the first raw file is stored. This
-        likely means that certain operations with external tools may not
+        If Raw is a concatenation of several raw files, **be warned** that
+        only the measurement information from the first raw file is stored.
+        This likely means that certain operations with external tools may not
         work properly on a saved concatenated file (e.g., probably some
         or all forms of SSS). It is recommended not to concatenate and
         then save raw files for this reason.
@@ -707,15 +1246,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                                    'raw.fif.gz', 'raw_sss.fif.gz',
                                    'raw_tsss.fif.gz'))
 
-        if isinstance(split_size, string_types):
-            exp = dict(MB=20, GB=30).get(split_size[-2:], None)
-            if exp is None:
-                raise ValueError('split_size has to end with either'
-                                 '"MB" or "GB"')
-            split_size = int(float(split_size[:-2]) * 2 ** exp)
-
-        if split_size > 2147483648:
-            raise ValueError('split_size cannot be larger than 2GB')
+        split_size = _get_split_size(split_size)
 
         fname = op.realpath(fname)
         if not self.preload and fname in self._filenames:
@@ -731,15 +1262,15 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                          int=FIFF.FIFFT_INT,
                          single=FIFF.FIFFT_FLOAT,
                          double=FIFF.FIFFT_DOUBLE)
-        if not format in type_dict.keys():
-            raise ValueError('format must be "short", "int", "single", '
+        if fmt not in type_dict.keys():
+            raise ValueError('fmt must be "short", "int", "single", '
                              'or "double"')
         reset_dict = dict(short=False, int=False, single=True, double=True)
-        reset_range = reset_dict[format]
-        data_type = type_dict[format]
+        reset_range = reset_dict[fmt]
+        data_type = type_dict[fmt]
 
         data_test = self[0, 0][0]
-        if format == 'short' and np.iscomplexobj(data_test):
+        if fmt == 'short' and np.iscomplexobj(data_test):
             raise ValueError('Complex data must be saved as "single" or '
                              '"double", not "short"')
 
@@ -765,35 +1296,28 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         #
 
         #   Convert to samples
-        start = int(floor(tmin * self.info['sfreq']))
+        start = int(np.floor(tmin * self.info['sfreq']))
 
         if tmax is None:
             stop = self.last_samp + 1 - self.first_samp
         else:
-            stop = int(floor(tmax * self.info['sfreq']))
-
-        if buffer_size_sec is None:
-            if 'buffer_size_sec' in self.info:
-                buffer_size_sec = self.info['buffer_size_sec']
-            else:
-                buffer_size_sec = 10.0
-        buffer_size = int(ceil(buffer_size_sec * self.info['sfreq']))
+            stop = int(np.floor(tmax * self.info['sfreq']))
+        buffer_size = self._get_buffer_size(buffer_size_sec)
 
         # write the raw file
-        _write_raw(fname, self, info, picks, format, data_type, reset_range,
+        _write_raw(fname, self, info, picks, fmt, data_type, reset_range,
                    start, stop, buffer_size, projector, inv_comp,
                    drop_small_buffer, split_size, 0, None)
 
-    def plot(raw, events=None, duration=10.0, start=0.0, n_channels=20,
+    def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
              bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
              event_color='cyan', scalings=None, remove_dc=True, order='type',
-             show_options=False, title=None, show=True, block=False):
+             show_options=False, title=None, show=True, block=False,
+             highpass=None, lowpass=None, filtorder=4, clipping=None):
         """Plot raw data
 
         Parameters
         ----------
-        raw : instance of Raw
-            The raw data to plot.
         events : array | None
             Events to show with vertical bars.
         duration : float
@@ -805,18 +1329,23 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         bgcolor : color object
             Color of the background.
         color : dict | color object | None
-            Color for the data traces. If None, defaults to:
-            `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
-                 ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
+            Color for the data traces. If None, defaults to::
+
+                dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
+                     emg='k', ref_meg='steelblue', misc='k', stim='k',
+                     resp='k', chpi='k')
+
         bad_color : color object
             Color to make bad channels.
         event_color : color object
             Color to use for events.
         scalings : dict | None
-            Scale factors for the traces. If None, defaults to:
-            `dict(mag=1e-12, grad=4e-11, eeg=20e-6,
-                  eog=150e-6, ecg=5e-4, emg=1e-3,
-                  ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+            Scale factors for the traces. If None, defaults to::
+
+                dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                     emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
+                     resp=1, chpi=1e-4)
+
         remove_dc : bool
             If True remove DC component when plotting data.
         order : 'type' | 'original' | array
@@ -833,6 +1362,23 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         block : bool
             Whether to halt program execution until the figure is closed.
             Useful for setting bad channels on the fly (click on line).
+            May not work on all systems / platforms.
+        highpass : float | None
+            Highpass to apply when displaying data.
+        lowpass : float | None
+            Lowpass to apply when displaying data.
+        filtorder : int
+            Filtering order. Note that for efficiency and simplicity,
+            filtering during plotting uses forward-backward IIR filtering,
+            so the effective filter order will be twice ``filtorder``.
+            Filtering the lines for display may also produce some edge
+            artifacts (at the left and right edges) of the signals
+            during display. Filtering requires scipy >= 0.10.
+        clipping : str | None
+            If None, channels are allowed to exceed their designated bounds in
+            the plot. If "clamp", then values are clamped to the appropriate
+            range for display, creating step-like artifacts. If "transparent",
+            then excessive values are not shown, creating gaps in the traces.
 
         Returns
         -------
@@ -844,18 +1390,23 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         The arrow keys (up/down/left/right) can typically be used to navigate
         between channels and time ranges, but this depends on the backend
         matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
-        To mark or un-mark a channel as bad, click on the rather flat segments
-        of a channel's time series. The changes will be reflected immediately
-        in the raw object's ``raw.info['bads']`` entry.
+        The scaling can be adjusted with - and + (or =) keys. The viewport
+        dimensions can be adjusted with page up/page down and home/end keys.
+        Full screen mode can be to toggled with f11 key. To mark or un-mark a
+        channel as bad, click on the rather flat segments of a channel's time
+        series. The changes will be reflected immediately in the raw object's
+        ``raw.info['bads']`` entry.
         """
-        return plot_raw(raw, events, duration, start, n_channels, bgcolor,
+        return plot_raw(self, events, duration, start, n_channels, bgcolor,
                         color, bad_color, event_color, scalings, remove_dc,
-                        order, show_options, title, show, block)
+                        order, show_options, title, show, block, highpass,
+                        lowpass, filtorder, clipping)
 
     @verbose
-    def plot_psds(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
-                  proj=False, n_fft=2048, picks=None, ax=None, color='black',
-                  area_mode='std', area_alpha=0.33, n_jobs=1, verbose=None):
+    def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
+                 proj=False, n_fft=2048, picks=None, ax=None,
+                 color='black', area_mode='std', area_alpha=0.33,
+                 n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
         """Plot the power spectral density across channels
 
         Parameters
@@ -887,15 +1438,30 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             calculations. If None, no area will be plotted.
         area_alpha : float
             Alpha for the area.
+        n_overlap : int
+            The number of points of overlap between blocks. The default value
+            is 0 (no overlap).
+        dB : bool
+            If True, transform data to decibels.
+        show : bool
+            Call pyplot.show() at the end.
         n_jobs : int
             Number of jobs to run in parallel.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
         """
-        return plot_raw_psds(self, tmin, tmax, fmin, fmax, proj, n_fft, picks,
-                             ax, color, area_mode, area_alpha, n_jobs)
+        return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
+                            proj=proj, n_fft=n_fft, picks=picks, ax=ax,
+                            color=color, area_mode=area_mode,
+                            area_alpha=area_alpha, n_overlap=n_overlap,
+                            dB=dB, show=show, n_jobs=n_jobs)
 
-    def time_as_index(self, times, use_first_samp=False):
+    def time_as_index(self, times, use_first_samp=False, use_rounding=False):
         """Convert time to indices
 
         Parameters
@@ -905,6 +1471,9 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         use_first_samp : boolean
             If True, time is treated as relative to the session onset, else
             as relative to the recording onset.
+        use_rounding : boolean
+            If True, use rounding (instead of truncation) when converting
+            times to indicies. This can help avoid non-unique indices.
 
         Returns
         -------
@@ -912,7 +1481,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             Indices corresponding to the times supplied.
         """
         return _time_as_index(times, self.info['sfreq'], self.first_samp,
-                              use_first_samp)
+                              use_first_samp, use_rounding=use_rounding)
 
     def index_as_time(self, index, use_first_samp=False):
         """Convert indices to time
@@ -934,7 +1503,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
                               use_first_samp)
 
     def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
-                      return_singular=False, picks=None):
+                      return_singular=False, picks=None, scalings='norm'):
         """Estimate rank of the raw data
 
         This function is meant to provide a reasonable estimate of the rank.
@@ -959,6 +1528,17 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         picks : array_like of int, shape (n_selected_channels,)
             The channels to be considered for rank estimation.
             If None (default) meg and eeg channels are included.
+        scalings : dict | 'norm'
+            To achieve reliable rank estimation on multiple sensors,
+            sensors have to be rescaled. This parameter controls the
+            rescaling. If dict, it will update the
+            following dict of defaults:
+
+                dict(mag=1e11, grad=1e9, eeg=1e5)
+
+            If 'norm' data will be scaled by internally computed
+            channel-wise norms.
+            Defaults to 'norm'.
 
         Returns
         -------
@@ -979,6 +1559,8 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
 
         Bad channels will be excluded from calculations.
         """
+        from ..cov import _estimate_rank_meeg_signals
+
         start = max(0, self.time_as_index(tstart)[0])
         if tstop is None:
             stop = self.n_times - 1
@@ -988,13 +1570,17 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         if picks is None:
             picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
                                exclude='bads')
-
         # ensure we don't get a view of data
         if len(picks) == 1:
             return 1.0, 1.0
         # this should already be a copy, so we can overwrite it
         data = self[picks, tslice][0]
-        return estimate_rank(data, tol, return_singular, copy=False)
+        out = _estimate_rank_meeg_signals(
+            data, pick_info(self.info, picks),
+            scalings=scalings, tol=tol, return_singular=return_singular,
+            copy=False)
+
+        return out
 
     @property
     def ch_names(self):
@@ -1002,6 +1588,11 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         return self.info['ch_names']
 
     @property
+    def times(self):
+        """Time points"""
+        return self._times
+
+    @property
     def n_times(self):
         """Number of time points"""
         return self.last_samp - self.first_samp + 1
@@ -1056,7 +1647,6 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         raws : list, or Raw instance
             list of Raw instances to concatenate to the current instance
             (in order), or a single raw instance to concatenate.
-
         preload : bool, str, or None (default None)
             Preload data into memory for data manipulation and faster indexing.
             If True, the data will be preloaded into memory (fast, requires
@@ -1066,6 +1656,10 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             None, preload=True or False is inferred using the preload status
             of the raw files passed in.
         """
+        from .fiff.raw import RawFIF
+        from .kit.kit import RawKIT
+        from .edf.edf import RawEDF
+
         if not isinstance(raws, list):
             raws = [raws]
 
@@ -1082,10 +1676,12 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             else:
                 preload = False
 
+        if not preload and not isinstance(self, (RawFIF, RawKIT, RawEDF)):
+            raise RuntimeError('preload must be True to concatenate '
+                               'files unless they are FIF, KIT, or EDF')
         if preload is False:
             if self.preload:
                 self._data = None
-                self._times = None
             self.preload = False
         else:
             # do the concatenation ourselves since preload might be a string
@@ -1121,19 +1717,19 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         for r in raws:
             self._first_samps = np.r_[self._first_samps, r._first_samps]
             self._last_samps = np.r_[self._last_samps, r._last_samps]
-            self._raw_lengths = np.r_[self._raw_lengths, r._raw_lengths]
-            self.rawdirs += r.rawdirs
+            self._raw_extras += r._raw_extras
             self._filenames += r._filenames
-        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
+        self._update_times()
 
-        # this has to be done after first and last sample are set appropriately
-        if self.preload:
-            self._times = np.arange(self.n_times) / self.info['sfreq']
+        if not (len(self._first_samps) == len(self._last_samps) ==
+                len(self._raw_extras) == len(self._filenames)):
+            raise RuntimeError('Append error')  # should never happen
 
     def close(self):
         """Clean up the object.
 
-        Does nothing for now.
+        Does nothing for objects that close their file descriptors.
+        Things like RawFIF will override this method.
         """
         pass
 
@@ -1142,128 +1738,14 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         """
         return deepcopy(self)
 
-    def as_data_frame(self, picks=None, start=None, stop=None, scale_time=1e3,
-                      scalings=None, use_time_index=True, copy=True):
-        """Get the epochs as Pandas DataFrame
-
-        Export raw data in tabular structure with MEG channels.
-
-        Caveat! To save memory, depending on selected data size consider
-        setting copy to False.
-
-        Parameters
-        ----------
-        picks : array-like of int | None
-            If None only MEG and EEG channels are kept
-            otherwise the channels indices in picks are kept.
-        start : int | None
-            Data-extraction start index. If None, data will be exported from
-            the first sample.
-        stop : int | None
-            Data-extraction stop index. If None, data will be exported to the
-            last index.
-        scale_time : float
-            Scaling to be applied to time units.
-        scalings : dict | None
-            Scaling to be applied to the channels picked. If None, defaults to
-            ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
-        use_time_index : bool
-            If False, times will be included as in the data table, else it will
-            be used as index object.
-        copy : bool
-            If true, data will be copied. Else data may be modified in place.
-
-        Returns
-        -------
-        df : instance of pandas.core.DataFrame
-            Raw data exported into tabular data structure.
-        """
-
-        pd = _check_pandas_installed()
-        if picks is None:
-            picks = list(range(self.info['nchan']))
-
-        data, times = self[picks, start:stop]
-
-        if copy:
-            data = data.copy()
-
-        types = [channel_type(self.info, idx) for idx in picks]
-        n_channel_types = 0
-        ch_types_used = []
-
-        scalings = _mutable_defaults(('scalings', scalings))[0]
-        for t in scalings.keys():
-            if t in types:
-                n_channel_types += 1
-                ch_types_used.append(t)
-
-        for t in ch_types_used:
-            scaling = scalings[t]
-            idx = [picks[i] for i in range(len(picks)) if types[i] == t]
-            if len(idx) > 0:
-                data[idx] *= scaling
-
-        assert times.shape[0] == data.shape[1]
-        col_names = [self.ch_names[k] for k in picks]
-
-        df = pd.DataFrame(data.T, columns=col_names)
-        df.insert(0, 'time', times * scale_time)
-
-        if use_time_index is True:
-            if 'time' in df:
-                df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(record=True):
-                df.set_index('time', inplace=True)
-
-        return df
-
-    def to_nitime(self, picks=None, start=None, stop=None,
-                  use_first_samp=False, copy=True):
-        """ Raw data as nitime TimeSeries
-
-        Parameters
-        ----------
-        picks : array-like of int | None
-            Indices of channels to apply. If None, all channels will be
-            exported.
-        start : int | None
-            Data-extraction start index. If None, data will be exported from
-            the first sample.
-        stop : int | None
-            Data-extraction stop index. If None, data will be exported to the
-            last index.
-        use_first_samp: bool
-            If True, the time returned is relative to the session onset, else
-            relative to the recording onset.
-        copy : bool
-            Whether to copy the raw data or not.
-
-        Returns
-        -------
-        raw_ts : instance of nitime.TimeSeries
-        """
-        try:
-            from nitime import TimeSeries  # to avoid strong dependency
-        except ImportError:
-            raise Exception('the nitime package is missing')
-
-        data, _ = self[picks, start:stop]
-        if copy:
-            data = data.copy()
-
-        start_time = self.index_as_time(start if start else 0, use_first_samp)
-        raw_ts = TimeSeries(data, sampling_rate=self.info['sfreq'],
-                            t0=start_time)
-
-        raw_ts.ch_names = [self.ch_names[k] for k in picks]
-
-        return raw_ts
-
     def __repr__(self):
+        name = self._filenames[0]
+        name = 'None' if name is None else op.basename(name)
+        s = ', '.join(('%r' % name, "n_channels x n_times : %s x %s"
+                       % (len(self.ch_names), self.n_times)))
         s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
                                                 self.n_times)
-        return "<Raw  |  %s>" % s
+        return "<%s  |  %s>" % (self.__class__.__name__, s)
 
     def add_events(self, events, stim_channel=None):
         """Add events to stim channel
@@ -1290,7 +1772,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
         events = np.asarray(events)
         if events.ndim != 2 or events.shape[1] != 3:
             raise ValueError('events must be shape (n_events, 3)')
-        stim_channel = _get_stim_channel(stim_channel)
+        stim_channel = _get_stim_channel(stim_channel, self.info)
         pick = pick_channels(self.ch_names, stim_channel)
         if len(pick) == 0:
             raise ValueError('Channel %s not found' % stim_channel)
@@ -1303,59 +1785,14 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin):
             raise ValueError('event sample numbers must be integers')
         self._data[pick, idx - self.first_samp] += events[:, 2]
 
-
-def set_eeg_reference(raw, ref_channels, copy=True):
-    """Rereference eeg channels to new reference channel(s).
-
-    If multiple reference channels are specified, they will be averaged.
-
-    Parameters
-    ----------
-    raw : instance of Raw
-        Instance of Raw with eeg channels and reference channel(s).
-
-    ref_channels : list of str
-        The name(s) of the reference channel(s).
-
-    copy : bool
-        Specifies whether instance of Raw will be copied or modified in place.
-
-    Returns
-    -------
-    raw : instance of Raw
-        Instance of Raw with eeg channels rereferenced.
-
-    ref_data : array
-        Array of reference data subtracted from eeg channels.
-    """
-    # Check to see that raw data is preloaded
-    if not raw.preload:
-        raise RuntimeError('Raw data needs to be preloaded. Use '
-                           'preload=True (or string) in the constructor.')
-    # Make sure that reference channels are loaded as list of string
-    if not isinstance(ref_channels, list):
-        raise IOError('Reference channel(s) must be a list of string. '
-                      'If using a single reference channel, enter as '
-                      'a list with one element.')
-    # Find the indices to the reference electrodes
-    ref_idx = [raw.ch_names.index(c) for c in ref_channels]
-
-    # Get the reference array
-    ref_data = raw._data[ref_idx].mean(0)
-
-    # Get the indices to the eeg channels using the pick_types function
-    eeg_idx = pick_types(raw.info, exclude="bads", eeg=True, meg=False,
-                         ref_meg=False)
-
-    # Copy raw data or modify raw data in place
-    if copy:  # copy data
-        raw = raw.copy()
-
-    # Rereference the eeg channels
-    raw._data[eeg_idx] -= ref_data
-
-    # Return rereferenced data and reference array
-    return raw, ref_data
+    def _get_buffer_size(self, buffer_size_sec=None):
+        """Helper to get the buffer size"""
+        if buffer_size_sec is None:
+            if 'buffer_size_sec' in self.info:
+                buffer_size_sec = self.info['buffer_size_sec']
+            else:
+                buffer_size_sec = 10.0
+        return int(np.ceil(buffer_size_sec * self.info['sfreq']))
 
 
 def _allocate_data(data, data_buffer, data_shape, dtype):
@@ -1370,25 +1807,43 @@ def _allocate_data(data, data_buffer, data_shape, dtype):
     return data
 
 
-def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False):
+def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False,
+                   use_rounding=False):
     """Convert time to indices
 
     Parameters
     ----------
     times : list-like | float | int
         List of numbers or a number representing points in time.
+    sfreq : float | int
+        Sample frequency.
+    first_samp : int
+       Index to use as first time point.
     use_first_samp : boolean
         If True, time is treated as relative to the session onset, else
         as relative to the recording onset.
+    use_rounding : boolean
+        If True, use rounding (instead of truncation) when converting times to
+        indicies. This can help avoid non-unique indices.
 
     Returns
     -------
     index : ndarray
         Indices corresponding to the times supplied.
+
+    Notes
+    -----
+    np.round will return the nearest even number for values exactly between
+        two integers.
     """
     index = np.atleast_1d(times) * sfreq
     index -= (first_samp if use_first_samp else 0)
-    return index.astype(int)
+
+    # Round or truncate time indices
+    if use_rounding:
+        return np.round(index).astype(int)
+    else:
+        return index.astype(int)
 
 
 def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
@@ -1417,8 +1872,8 @@ class _RawShell():
     def __init__(self):
         self.first_samp = None
         self.last_samp = None
-        self.cals = None
-        self.rawdir = None
+        self._cals = None
+        self._rawdir = None
         self._projector = None
 
     @property
@@ -1428,7 +1883,7 @@ class _RawShell():
 
 ###############################################################################
 # Writing
-def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
+def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
                stop, buffer_size, projector, inv_comp, drop_small_buffer,
                split_size, part_idx, prev_fname):
     """Write raw file with splitting
@@ -1477,8 +1932,8 @@ def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
         if projector is not None:
             data = np.dot(projector, data)
 
-        if ((drop_small_buffer and (first > start)
-             and (len(times) < buffer_size))):
+        if ((drop_small_buffer and (first > start) and
+             (len(times) < buffer_size))):
             logger.info('Skipping data chunk due to small buffer ... '
                         '[done]')
             break
@@ -1487,7 +1942,7 @@ def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
         if pos_prev is None:
             pos_prev = fid.tell()
 
-        _write_raw_buffer(fid, data, cals, format, inv_comp)
+        _write_raw_buffer(fid, data, cals, fmt, inv_comp)
 
         pos = fid.tell()
         this_buff_size_bytes = pos - pos_prev
@@ -1500,7 +1955,8 @@ def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
 
         # Split files if necessary, leave some space for next file info
         if pos >= split_size - this_buff_size_bytes - 2 ** 20:
-            next_fname, next_idx = _write_raw(fname, raw, info, picks, format,
+            next_fname, next_idx = _write_raw(
+                fname, raw, info, picks, fmt,
                 data_type, reset_range, first + buffer_size, stop, buffer_size,
                 projector, inv_comp, drop_small_buffer, split_size,
                 part_idx + 1, use_fname)
@@ -1517,8 +1973,12 @@ def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
         pos_prev = pos
 
     logger.info('Closing %s [done]' % use_fname)
-    _finish_writing_raw(fid)
-
+    if info.get('maxshield', False):
+        end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
+    else:
+        end_block(fid, FIFF.FIFFB_RAW_DATA)
+    end_block(fid, FIFF.FIFFB_MEAS)
+    end_file(fid)
     return use_fname, part_idx
 
 
@@ -1550,6 +2010,11 @@ def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
         calibration factors.
     """
     #
+    #    Measurement info
+    #
+    info = pick_info(info, sel, copy=True)
+
+    #
     #  Create the file and save the essentials
     #
     fid = start_file(name)
@@ -1557,26 +2022,6 @@ def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
     write_id(fid, FIFF.FIFF_BLOCK_ID)
     if info['meas_id'] is not None:
         write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
-    #
-    #    Measurement info
-    #
-    info = copy.deepcopy(info)
-    if sel is not None:
-        info['chs'] = [info['chs'][k] for k in sel]
-        info['nchan'] = len(sel)
-
-        ch_names = [c['ch_name'] for c in info['chs']]  # name of good channels
-        comps = copy.deepcopy(info['comps'])
-        for c in comps:
-            row_idx = [k for k, n in enumerate(c['data']['row_names'])
-                       if n in ch_names]
-            row_names = [c['data']['row_names'][i] for i in row_idx]
-            rowcals = c['rowcals'][row_idx]
-            c['rowcals'] = rowcals
-            c['data']['nrow'] = len(row_names)
-            c['data']['row_names'] = row_names
-            c['data']['data'] = c['data']['data'][row_idx]
-        info['comps'] = comps
 
     cals = []
     for k in range(info['nchan']):
@@ -1593,12 +2038,15 @@ def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
     #
     # Start the raw data
     #
-    start_block(fid, FIFF.FIFFB_RAW_DATA)
+    if info.get('maxshield', False):
+        start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
+    else:
+        start_block(fid, FIFF.FIFFB_RAW_DATA)
 
     return fid, cals
 
 
-def _write_raw_buffer(fid, buf, cals, format, inv_comp):
+def _write_raw_buffer(fid, buf, cals, fmt, inv_comp):
     """Write raw buffer
 
     Parameters
@@ -1609,7 +2057,7 @@ def _write_raw_buffer(fid, buf, cals, format, inv_comp):
         The buffer to write.
     cals : array
         Calibration factors.
-    format : str
+    fmt : str
         'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
         float for each item. This will be doubled for complex datatypes. Note
         that short and int formats cannot be used for complex data.
@@ -1620,22 +2068,22 @@ def _write_raw_buffer(fid, buf, cals, format, inv_comp):
     if buf.shape[0] != len(cals):
         raise ValueError('buffer and calibration sizes do not match')
 
-    if not format in ['short', 'int', 'single', 'double']:
-        raise ValueError('format must be "short", "single", or "double"')
+    if fmt not in ['short', 'int', 'single', 'double']:
+        raise ValueError('fmt must be "short", "single", or "double"')
 
     if np.isrealobj(buf):
-        if format == 'short':
+        if fmt == 'short':
             write_function = write_dau_pack16
-        elif format == 'int':
+        elif fmt == 'int':
             write_function = write_int
-        elif format == 'single':
+        elif fmt == 'single':
             write_function = write_float
         else:
             write_function = write_double
     else:
-        if format == 'single':
+        if fmt == 'single':
             write_function = write_complex64
-        elif format == 'double':
+        elif fmt == 'double':
             write_function = write_complex128
         else:
             raise ValueError('only "single" and "double" supported for '
@@ -1649,28 +2097,40 @@ def _write_raw_buffer(fid, buf, cals, format, inv_comp):
     write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
 
 
-def _finish_writing_raw(fid):
-    """Finish writing raw FIF file
+def _my_hilbert(x, n_fft=None, envelope=False):
+    """ Compute Hilbert transform of signals w/ zero padding.
 
     Parameters
     ----------
-    fid : file descriptor
-        an open raw data file.
-    """
-    end_block(fid, FIFF.FIFFB_RAW_DATA)
-    end_block(fid, FIFF.FIFFB_MEAS)
-    end_file(fid)
+    x : array, shape (n_times)
+        The signal to convert
+    n_fft : int, length > x.shape[-1] | None
+        How much to pad the signal before Hilbert transform.
+        Note that signal will then be cut back to original length.
+    envelope : bool
+        Whether to compute amplitude of the hilbert transform in order
+        to return the signal envelope.
 
-
-def _envelope(x):
-    """ Compute envelope signal """
-    return np.abs(hilbert(x))
+    Returns
+    -------
+    out : array, shape (n_times)
+        The hilbert transform of the signal, or the envelope.
+    """
+    from scipy.signal import hilbert
+    n_fft = x.shape[-1] if n_fft is None else n_fft
+    n_x = x.shape[-1]
+    out = hilbert(x, N=n_fft)[:n_x]
+    if envelope is True:
+        out = np.abs(out)
+    return out
 
 
 def _check_raw_compatibility(raw):
     """Check to make sure all instances of Raw
     in the input list raw have compatible parameters"""
     for ri in range(1, len(raw)):
+        if not isinstance(raw[ri], type(raw[0])):
+            raise ValueError('raw[%d] type must match' % ri)
         if not raw[ri].info['nchan'] == raw[0].info['nchan']:
             raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
         if not raw[ri].info['bads'] == raw[0].info['bads']:
@@ -1679,14 +2139,14 @@ def _check_raw_compatibility(raw):
             raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
         if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
             raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
-        if not all(raw[ri].cals == raw[0].cals):
-            raise ValueError('raw[%d].cals must match' % ri)
+        if not all(raw[ri]._cals == raw[0]._cals):
+            raise ValueError('raw[%d]._cals must match' % ri)
         if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
             raise ValueError('SSP projectors in raw files must be the same')
-        if not all(proj_equal(p1, p2) for p1, p2 in
+        if not all(_proj_equal(p1, p2) for p1, p2 in
                    zip(raw[0].info['projs'], raw[ri].info['projs'])):
             raise ValueError('SSP projectors in raw files must be the same')
-    if not all([r.orig_format == raw[0].orig_format for r in raw]):
+    if not all(r.orig_format == raw[0].orig_format for r in raw):
         warnings.warn('raw files do not all have the same data format, '
                       'could result in precision mismatch. Setting '
                       'raw.orig_format="unknown"')
@@ -1729,87 +2189,30 @@ def concatenate_raws(raws, preload=None, events_list=None):
         return raws[0], events
 
 
-def get_chpi_positions(raw, t_step=None):
-    """Extract head positions
-
-    Note that the raw instance must have CHPI channels recorded.
-
-    Parameters
-    ----------
-    raw : instance of Raw | str
-        Raw instance to extract the head positions from. Can also be a
-        path to a Maxfilter log file (str).
-    t_step : float | None
-        Sampling interval to use when converting data. If None, it will
-        be automatically determined. By default, a sampling interval of
-        1 second is used if processing a raw data. If processing a
-        Maxfilter log file, this must be None because the log file
-        itself will determine the sampling interval.
-
-    Returns
-    -------
-    translation : array
-        A 2-dimensional array of head position vectors (n_time x 3).
-    rotation : array
-        A 3-dimensional array of rotation matrices (n_time x 3 x 3).
-    t : array
-        The time points associated with each position (n_time).
-
-    Notes
-    -----
-    The digitized HPI head frame y is related to the frame position X as:
-
-        Y = np.dot(rotation, X) + translation
-
-    Note that if a Maxfilter log file is being processed, the start time
-    may not use the same reference point as the rest of mne-python (i.e.,
-    it could be referenced relative to raw.first_samp or something else).
-    """
-    if isinstance(raw, _BaseRaw):
-        # for simplicity, we'll sample at 1 sec intervals like maxfilter
-        if t_step is None:
-            t_step = 1.0
-        if not np.isscalar(t_step):
-            raise TypeError('t_step must be a scalar or None')
-        picks = pick_types(raw.info, meg=False, ref_meg=False,
-                           chpi=True, exclude=[])
-        if len(picks) == 0:
-            raise RuntimeError('raw file has no CHPI channels')
-        time_idx = raw.time_as_index(np.arange(0, raw.n_times
-                                               / raw.info['sfreq'], t_step))
-        data = [raw[picks, ti] for ti in time_idx]
-        t = np.array([d[1] for d in data])
-        data = np.array([d[0][:, 0] for d in data])
-        data = np.c_[t, data]
-    else:
-        if not isinstance(raw, string_types):
-            raise TypeError('raw must be an instance of Raw or string')
-        if not op.isfile(raw):
-            raise IOError('File "%s" does not exist' % raw)
-        if t_step is not None:
-            raise ValueError('t_step must be None if processing a log')
-        data = np.loadtxt(raw, skiprows=1)  # first line is header, skip it
-    t = data[:, 0]
-    translation = data[:, 4:7].copy()
-    rotation = _quart_to_rot(data[:, 1:4])
-    return translation, rotation, t
-
-
-def _quart_to_rot(q):
-    """Helper to convert quarternions to rotations"""
-    q0 = np.sqrt(1 - np.sum(q[:, 0:3] ** 2, 1))
-    q1 = q[:, 0]
-    q2 = q[:, 1]
-    q3 = q[:, 2]
-    rotation = np.array((np.c_[(q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 ** 2,
-                                2 * (q1 * q2 - q0 * q3),
-                                2 * (q1 * q3 + q0 * q2))],
-                         np.c_[(2 * (q1 * q2 + q0 * q3),
-                                q0 ** 2 + q2 ** 2 - q1 ** 2 - q3 ** 2,
-                                2 * (q2 * q3 - q0 * q1))],
-                         np.c_[(2 * (q1 * q3 - q0 * q2),
-                                2 * (q2 * q3 + q0 * q1),
-                                q0 ** 2 + q3 ** 2 - q1 ** 2 - q2 ** 2)]
-                         ))
-    rotation = np.swapaxes(rotation, 0, 1).copy()
-    return rotation
+def _check_update_montage(info, montage):
+    """ Helper function for eeg readers to add montage"""
+    if montage is not None:
+        if not isinstance(montage, (str, Montage)):
+            err = ("Montage must be str, None, or instance of Montage. "
+                   "%s was provided" % type(montage))
+            raise TypeError(err)
+        if montage is not None:
+            if isinstance(montage, str):
+                montage = read_montage(montage)
+            _set_montage(info, montage)
+
+            missing_positions = []
+            exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
+                       FIFF.FIFFV_STIM_CH)
+            for ch in info['chs']:
+                if not ch['kind'] in exclude:
+                    if np.unique(ch['loc']).size == 1:
+                        missing_positions.append(ch['ch_name'])
+
+            # raise error if positions are missing
+            if missing_positions:
+                err = ("The following positions are missing from the montage "
+                       "definitions: %s. If those channels lack positions "
+                       "because they are EOG channels use the eog parameter."
+                       % str(missing_positions))
+                raise KeyError(err)
diff --git a/mne/io/brainvision/__init__.py b/mne/io/brainvision/__init__.py
index 8d992e4..17a7db2 100644
--- a/mne/io/brainvision/__init__.py
+++ b/mne/io/brainvision/__init__.py
@@ -1,6 +1,6 @@
 """Brainvision module for conversion to FIF"""
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py
index cc33b16..f030d9c 100644
--- a/mne/io/brainvision/brainvision.py
+++ b/mne/io/brainvision/brainvision.py
@@ -1,7 +1,9 @@
+# -*- coding: utf-8 -*-
 """Conversion tool from Brain Vision EEG to FIF"""
 
-# Authors: Teon Brooks <teon at nyu.edu>
+# Authors: Teon Brooks <teon.brooks at gmail.com>
 #          Christian Brodbeck <christianbrodbeck at nyu.edu>
+#          Eric Larson <larson.eric.d at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -12,12 +14,11 @@ import warnings
 
 import numpy as np
 
-from ...coreg import get_ras_to_neuromag_trans, read_elp
-from ...transforms import als_ras_trans, apply_trans
 from ...utils import verbose, logger
 from ..constants import FIFF
-from ..meas_info import Info
-from ..base import _BaseRaw
+from ..meas_info import _empty_info
+from ..base import _BaseRaw, _check_update_montage
+from ..reference import add_reference_channels
 
 from ...externals.six import StringIO, u
 from ...externals.six.moves import configparser
@@ -28,28 +29,38 @@ class RawBrainVision(_BaseRaw):
 
     Parameters
     ----------
-    vdhr_fname : str
+    vhdr_fname : str
         Path to the EEG header file.
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-    elp_names : list | None
-        A list of channel names in the same order as the points in the elp
-        file. Electrode positions should be specified with the same names as
-        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
-        "rpa". ELP positions with other names are ignored. If elp_names is not
-        None and channels are missing, a KeyError is raised.
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the vhdr file.
+        Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes
+        in the vhdr file. Default is ``()``.
     reference : None | str
+        **Deprecated**, use `add_reference_channel` instead.
         Name of the electrode which served as the reference in the recording.
         If a name is provided, a corresponding channel is added and its data
         is set to 0. This is useful for later re-referencing. The name should
-        correspond to a name in elp_names.
-    eog : list of str
-        Names of channels that should be designated EOG channels. Names should
-        correspond to the vhdr file (default: ['HEOGL', 'HEOGR', 'VEOGb']).
+        correspond to a name in elp_names. Data must be preloaded.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    response_trig_shift : int | None
+        An integer that will be added to all response triggers when reading
+        events (stimulus triggers will be unaffected). If None, response
+        triggers will be ignored. Default is 0 for backwards compatibility, but
+        typically another value or None will be necessary.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -58,164 +69,58 @@ class RawBrainVision(_BaseRaw):
     mne.io.Raw : Documentation of attribute and methods.
     """
     @verbose
-    def __init__(self, vhdr_fname, elp_fname=None, elp_names=None,
-                 preload=False, reference=None,
-                 eog=['HEOGL', 'HEOGR', 'VEOGb'], ch_names=None, verbose=None):
-        # backwards compatibility
-        if ch_names is not None:
-            if elp_names is not None:
-                err = ("ch_names is a deprecated parameter, don't specify "
-                       "ch_names if elp_names are specified.")
-                raise TypeError(err)
-            msg = "The ch_names parameter is deprecated. Use elp_names."
-            warnings.warn(msg, DeprecationWarning)
-            elp_names = ['nasion', 'lpa', 'rpa', None, None, None, None,
-                         None] + list(ch_names)
-
-        # Preliminary Raw attributes
-        self._events = np.empty((0, 3))
-        self.preload = False
-
+    def __init__(self, vhdr_fname, montage=None,
+                 eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(), reference=None,
+                 scale=1., preload=False, response_trig_shift=0, verbose=None):
         # Channel info and events
-        logger.info('Extracting eeg Parameters from %s...' % vhdr_fname)
+        logger.info('Extracting parameters from %s...' % vhdr_fname)
         vhdr_fname = os.path.abspath(vhdr_fname)
-        self.info, self._eeg_info, events = _get_eeg_info(vhdr_fname,
-                                                          elp_fname, elp_names,
-                                                          reference, eog)
-        self.set_brainvision_events(events)
-        logger.info('Creating Raw.info structure...')
-
-        # Raw attributes
-        self.verbose = verbose
-        self._filenames = list()
-        self._projector = None
-        self.comp = None  # no compensation for EEG
-        self.proj = False
-        self.first_samp = 0
-        with open(self.info['file_id'], 'rb') as f:
+        info, fmt, self._order, events = _get_vhdr_info(
+            vhdr_fname, eog, misc, response_trig_shift, scale)
+        _check_update_montage(info, montage)
+        with open(info['filename'], 'rb') as f:
             f.seek(0, os.SEEK_END)
             n_samples = f.tell()
-        dtype = int(self._eeg_info['dtype'][-1])
-        n_chan = self.info['nchan']
-        self.last_samp = (n_samples // (dtype * (n_chan - 1))) - 1
-        self._reference = reference
-
-        if preload:
-            self.preload = preload
-            logger.info('Reading raw data from %s...' % vhdr_fname)
-            self._data, _ = self._read_segment()
-            assert len(self._data) == self.info['nchan']
-
-            # Add time info
-            self._times = np.arange(self.first_samp, self.last_samp + 1,
-                                    dtype=np.float64)
-            self._times /= self.info['sfreq']
-            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
-                        % (self.first_samp, self.last_samp,
-                           float(self.first_samp) / self.info['sfreq'],
-                           float(self.last_samp) / self.info['sfreq']))
-        logger.info('Ready.')
-
-    def __repr__(self):
-        n_chan = self.info['nchan']
-        data_range = self.last_samp - self.first_samp + 1
-        s = ('%r' % os.path.basename(self.info['file_id']),
-             "n_channels x n_times : %s x %s" % (n_chan, data_range))
-        return "<RawEEG  |  %s>" % ', '.join(s)
-
-    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
-                      projector=None):
-        """Read a chunk of raw data
-
-        Parameters
-        ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
-        sel : array, optional
-            Indices of channels to select.
-        projector : array
-            SSP operator to apply to the data.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-
-        Returns
-        -------
-        data : array, shape (n_channels, n_samples)
-           The data.
-        times : array, shape (n_samples,)
-            returns the time values corresponding to the samples.
-        """
-        if sel is not None:
-            if len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
-                return (666, 666)
-        if projector is not None:
-            raise NotImplementedError('Currently does not handle projections.')
-        if stop is None:
-            stop = self.last_samp + 1
-        elif stop > self.last_samp + 1:
-            stop = self.last_samp + 1
-
-        #  Initial checks
-        start = int(start)
-        stop = int(stop)
-        if start >= stop:
-            raise ValueError('No data in this range')
-
-        # assemble channel information
-        eeg_info = self._eeg_info
-        sfreq = self.info['sfreq']
-        chs = self.info['chs']
-        if self._reference:
-            chs = chs[:-1]
-        if len(self._events):
-            chs = chs[:-1]
-        n_eeg = len(chs)
-        cals = np.atleast_2d([chan_info['cal'] for chan_info in chs])
-        mults = np.atleast_2d([chan_info['unit_mul'] for chan_info in chs])
-
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(sfreq),
-                     (stop - 1) / float(sfreq)))
-
+        dtype_bytes = _fmt_byte_dict[fmt]
+        self.preload = False  # so the event-setting works
+        self.set_brainvision_events(events)
+        last_samps = [(n_samples // (dtype_bytes * (info['nchan'] - 1))) - 1]
+        super(RawBrainVision, self).__init__(
+            info, last_samps=last_samps, filenames=[info['filename']],
+            orig_format=fmt, preload=preload, verbose=verbose)
+
+        # add reference
+        if reference is not None:
+            warnings.warn('reference is deprecated and will be removed in '
+                          'v0.11. Use add_reference_channels instead.')
+            if preload is False:
+                raise ValueError("Preload must be set to True if reference is "
+                                 "specified.")
+            add_reference_channels(self, reference, copy=False)
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
         # read data
-        dtype = np.dtype(eeg_info['dtype'])
-        buffer_size = (stop - start)
-        pointer = start * n_eeg * dtype.itemsize
-        with open(self.info['file_id'], 'rb') as f:
+        n_data_ch = len(self.ch_names) - 1
+        n_times = stop - start + 1
+        pointer = start * n_data_ch * _fmt_byte_dict[self.orig_format]
+        with open(self._filenames[fi], 'rb') as f:
             f.seek(pointer)
             # extract data
-            data = np.fromfile(f, dtype=dtype, count=buffer_size * n_eeg)
-        if eeg_info['data_orientation'] == 'MULTIPLEXED':
-            data = data.reshape((n_eeg, -1), order='F')
-        elif eeg_info['data_orientation'] == 'VECTORIZED':
-            data = data.reshape((n_eeg, -1), order='C')
-
-        gains = cals * mults
-        data = data * gains.T
-
-        # add reference channel and stim channel (if applicable)
-        data_segments = [data]
-        if self._reference:
-            shape = (1, data.shape[1])
-            ref_channel = np.zeros(shape)
-            data_segments.append(ref_channel)
-        if len(self._events):
-            stim_channel = _synthesize_stim_channel(self._events, start, stop)
-            data_segments.append(stim_channel)
-        if len(data_segments) > 1:
-            data = np.vstack(data_segments)
-
-        if sel is not None:
-            data = data[sel]
-
-        logger.info('[done]')
-        times = np.arange(start, stop, dtype=float) / sfreq
-
-        return data, times
+            data_buffer = np.fromfile(
+                f, dtype=_fmt_dtype_dict[self.orig_format],
+                count=n_times * n_data_ch)
+        data_buffer = data_buffer.reshape((n_data_ch, n_times),
+                                          order=self._order)
+
+        data_ = np.empty((n_data_ch + 1, n_times), dtype=np.float64)
+        data_[:-1] = data_buffer  # cast to float64
+        del data_buffer
+        data_[-1] = _synthesize_stim_channel(self._events, start, stop + 1)
+        data_ *= self._cals[:, np.newaxis]
+        data[:, offset:offset + stop - start + 1] = \
+            np.dot(mult, data_) if mult is not None else data_[idx]
 
     def get_brainvision_events(self):
         """Retrieve the events associated with the Brain Vision Raw object
@@ -229,7 +134,7 @@ class RawBrainVision(_BaseRaw):
         return self._events.copy()
 
     def set_brainvision_events(self, events):
-        """Set the events (automatically updates the synthesized stim channel)
+        """Set the events and update the synthesized stim channel
 
         Parameters
         ----------
@@ -237,57 +142,26 @@ class RawBrainVision(_BaseRaw):
             Events, each row consisting of an (onset, duration, trigger)
             sequence.
         """
-        events = np.copy(events)
-        if not events.ndim == 2 and events.shape[1] == 3:
+        events = np.array(events, int)
+        if events.ndim != 2 or events.shape[1] != 3:
             raise ValueError("[n_events x 3] shaped array required")
-
-        # update info based on presence of stim channel
-        had_events = bool(len(self._events))
-        has_events = bool(len(events))
-        if had_events and not has_events:  # remove stim channel
-            if self.info['ch_names'][-1] != 'STI 014':
-                err = "Last channel is not stim channel; info was modified"
-                raise RuntimeError(err)
-            self.info['nchan'] -= 1
-            del self.info['ch_names'][-1]
-            del self.info['chs'][-1]
-            if self.preload:
-                self._data = self._data[:-1]
-        elif has_events and not had_events:  # add stim channel
-            idx = len(self.info['chs']) + 1
-            chan_info = {'ch_name': 'STI 014',
-                         'kind': FIFF.FIFFV_STIM_CH,
-                         'coil_type': FIFF.FIFFV_COIL_NONE,
-                         'logno': idx,
-                         'scanno': idx,
-                         'cal': 1,
-                         'range': 1,
-                         'unit_mul':  0,
-                         'unit': FIFF.FIFF_UNIT_NONE,
-                         'eeg_loc': np.zeros(3),
-                         'loc': np.zeros(12)}
-            self.info['nchan'] += 1
-            self.info['ch_names'].append(chan_info['ch_name'])
-            self.info['chs'].append(chan_info)
-            if self.preload:
-                shape = (1, self._data.shape[1])
-                self._data = np.vstack((self._data, np.empty(shape)))
-
         # update events
         self._events = events
-        if has_events and self.preload:
+        if self.preload:
             start = self.first_samp
             stop = self.last_samp + 1
             self._data[-1] = _synthesize_stim_channel(events, start, stop)
 
 
-def _read_vmrk_events(fname):
+def _read_vmrk_events(fname, response_trig_shift=0):
     """Read events from a vmrk file
 
     Parameters
     ----------
     fname : str
         vmrk file to be read.
+    response_trig_shift : int | None
+        Integer to shift response triggers by. None ignores response triggers.
 
     Returns
     -------
@@ -299,9 +173,16 @@ def _read_vmrk_events(fname):
     with open(fname) as fid:
         txt = fid.read()
 
-    start_tag = 'Brain Vision Data Exchange Marker File, Version 1.0'
-    if not txt.startswith(start_tag):
+    header = txt.split('\n')[0].strip()
+    start_tag = 'Brain Vision Data Exchange Marker File'
+    if not header.startswith(start_tag):
         raise ValueError("vmrk file should start with %r" % start_tag)
+    end_tag = 'Version 1.0'
+    if not header.endswith(end_tag):
+        raise ValueError("vmrk file should be %r" % end_tag)
+    if (response_trig_shift is not None and
+            not isinstance(response_trig_shift, int)):
+        raise TypeError("response_trig_shift must be an integer or None")
 
     # extract Marker Infos block
     m = re.search("\[Marker Infos\]", txt)
@@ -317,13 +198,18 @@ def _read_vmrk_events(fname):
     events = []
     for info in items:
         mtype, mdesc, onset, duration = info.split(',')[:4]
-        if mtype == 'Stimulus':
-            trigger = int(re.findall('S\s*?(\d+)', mdesc)[0])
-            onset = int(onset)
-            duration = int(duration)
-            events.append((onset, duration, trigger))
-
-    events = np.array(events)
+        try:
+            trigger = int(re.findall('[A-Za-z]*\s*?(\d+)', mdesc)[0])
+            if mdesc[0].lower() == 's' or response_trig_shift is not None:
+                if mdesc[0].lower() == 'r':
+                    trigger += response_trig_shift
+                onset = int(onset)
+                duration = int(duration)
+                events.append((onset, duration, trigger))
+        except IndexError:
+            pass
+
+    events = np.array(events).reshape(-1, 3)
     return events
 
 
@@ -349,7 +235,8 @@ def _synthesize_stim_channel(events, start, stop):
     onset = events[:, 0]
     offset = onset + events[:, 1]
     idx = np.logical_and(onset < stop, offset > start)
-    events = events[idx]
+    if idx.sum() > 0:  # fix for old numpy
+        events = events[idx]
 
     # make onset relative to buffer
     events[:, 0] -= start
@@ -366,100 +253,61 @@ def _synthesize_stim_channel(events, start, stop):
     return stim_channel
 
 
-def _get_elp_locs(elp_fname, elp_names):
-    """Read a Polhemus ascii file
+_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C')
+_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single')
+_fmt_byte_dict = dict(short=2, int=4, single=4)
+_fmt_dtype_dict = dict(short='<i2', int='<i4', single='<f4')
+_unit_dict = {'V': 1., u'µV': 1e-6}
 
-    Parameters
-    ----------
-    elp_fname : str
-        Path to head shape file acquired from Polhemus system and saved in
-        ascii format.
-    elp_names : list
-        A list in order of EEG electrodes found in the Polhemus digitizer file.
 
-    Returns
-    -------
-    ch_locs : dict
-        Dictionary whose keys are the names from elp_names and whose values
-        are the coordinates from the elp file transformed to Neuromag space.
-    """
-    coords_orig = read_elp(elp_fname)
-    coords_ras = apply_trans(als_ras_trans, coords_orig)
-    chs_ras = dict(zip(elp_names, coords_ras))
-    nasion = chs_ras['nasion']
-    lpa = chs_ras['lpa']
-    rpa = chs_ras['rpa']
-    trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
-    coords_neuromag = apply_trans(trans, coords_ras)
-    chs_neuromag = dict(zip(elp_names, coords_neuromag))
-    return chs_neuromag
-
-
-def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
+def _get_vhdr_info(vhdr_fname, eog, misc, response_trig_shift, scale):
     """Extracts all the information from the header file.
 
     Parameters
     ----------
     vhdr_fname : str
         Raw EEG header to be read.
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0, 0, 0).
-    elp_names : list | None
-        A list of channel names in the same order as the points in the elp
-        file. Electrode positions should be specified with the same names as
-        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
-        "rpa". ELP positions with other names are ignored. If elp_names is not
-        None and channels are missing, a KeyError is raised.
-    reference : None | str
-        Name of the electrode which served as the reference in the recording.
-        If a name is provided, a corresponding channel is added and its data
-        is set to 0. This is useful for later re-referencing. The name should
-        correspond to a name in elp_names.
     eog : list of str
         Names of channels that should be designated EOG channels. Names should
         correspond to the vhdr file.
+    misc : list of str
+        Names of channels that should be designated MISC channels. Names
+        should correspond to the electrodes in the vhdr file.
+    response_trig_shift : int | None
+        Integer to shift response triggers by. None ignores response triggers.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1.. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
 
     Returns
     -------
     info : Info
         The measurement info.
+    fmt : str
+        The data format in the file.
     edf_info : dict
         A dict containing Brain Vision specific parameters.
     events : array, shape (n_events, 3)
         Events from the corresponding vmrk file.
     """
+    scale = float(scale)
+    info = _empty_info()
 
-    info = Info()
-    # Some keys to be consistent with FIF measurement info
-    info['meas_id'] = None
-    info['projs'] = []
-    info['comps'] = []
-    info['bads'] = []
-    info['acq_pars'], info['acq_stim'] = None, None
-    info['filename'] = vhdr_fname
-    info['ctf_head_t'] = None
-    info['dev_ctf_t'] = []
-    info['dig'] = None
-    info['dev_head_t'] = None
-    info['proj_id'] = None
-    info['proj_name'] = None
-    info['experimenter'] = None
-    info['description'] = None
-    info['buffer_size_sec'] = 10.
-    info['orig_blocks'] = None
-    info['line_freq'] = None
-    info['subject_info'] = None
-
-    eeg_info = {}
-
+    ext = os.path.splitext(vhdr_fname)[-1]
+    if ext != '.vhdr':
+        raise IOError("The header file must be given to read the data, "
+                      "not the '%s' file." % ext)
     with open(vhdr_fname, 'r') as f:
         # extract the first section to resemble a cfg
         l = f.readline().strip()
         assert l == 'Brain Vision Data Exchange Header File Version 1.0'
         settings = f.read()
 
-    params, settings = settings.split('[Comment]')
+    if settings.find('[Comment]') != -1:
+        params, settings = settings.split('[Comment]')
+    else:
+        params, settings = settings, ''
     cfg = configparser.ConfigParser()
     if hasattr(cfg, 'read_file'):  # newer API
         cfg.read_file(StringIO(params))
@@ -468,53 +316,44 @@ def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
 
     # get sampling info
     # Sampling interval is given in microsec
-    sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
-    sfreq = int(sfreq)
-    n_data_chan = cfg.getint('Common Infos', 'NumberOfChannels')
-    n_eeg_chan = n_data_chan + bool(reference)
+    info['sfreq'] = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
 
     # check binary format
     assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
-    eeg_info['data_orientation'] = cfg.get('Common Infos', 'DataOrientation')
-    if not (eeg_info['data_orientation'] == 'MULTIPLEXED' or
-            eeg_info['data_orientation'] == 'VECTORIZED'):
+    order = cfg.get('Common Infos', 'DataOrientation')
+    if order not in _orientation_dict:
         raise NotImplementedError('Data Orientation %s is not supported'
-                                  % eeg_info['data_orientation'])
-
-    binary_format = cfg.get('Binary Infos', 'BinaryFormat')
-    if binary_format == 'INT_16':
-        eeg_info['dtype'] = '<i2'
-    elif binary_format == 'INT_32':
-        eeg_info['dtype'] = '<i4'
-    elif binary_format == 'IEEE_FLOAT_32':
-        eeg_info['dtype'] = '<f4'
-    else:
-        raise NotImplementedError('Datatype %s is not supported'
-                                  % binary_format)
+                                  % order)
+    order = _orientation_dict[order]
+
+    fmt = cfg.get('Binary Infos', 'BinaryFormat')
+    if fmt not in _fmt_dict:
+        raise NotImplementedError('Datatype %s is not supported' % fmt)
+    fmt = _fmt_dict[fmt]
 
     # load channel labels
-    ch_names = ['UNKNOWN'] * n_eeg_chan
-    cals = np.empty(n_eeg_chan)
-    cals[:] = np.nan
-    units = ['UNKNOWN'] * n_eeg_chan
+    info['nchan'] = cfg.getint('Common Infos', 'NumberOfChannels') + 1
+    ch_names = [''] * info['nchan']
+    cals = np.empty(info['nchan'])
+    ranges = np.empty(info['nchan'])
+    cals.fill(np.nan)
     for chan, props in cfg.items('Channel Infos'):
-        n = int(re.findall(r'ch(\d+)', chan)[0])
-        name, _, resolution, unit = props.split(',')[:4]
-        ch_names[n - 1] = name
-        cals[n - 1] = float(resolution)
+        n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
+        props = props.split(',')
+        if len(props) < 4:
+            props += ('V',)
+        name, _, resolution, unit = props[:4]
+        ch_names[n] = name
+        if resolution == "":  # For truncated vhdrs (e.g. EEGLAB export)
+            resolution = 0.000001
         unit = unit.replace('\xc2', '')  # Remove unwanted control characters
-        if u(unit) == u('\xb5V'):
-            units[n - 1] = 1e-6
-        elif unit == 'V':
-            units[n - 1] = 0
-        else:
-            units[n - 1] = unit
-
-    # add reference channel info
-    if reference:
-        ch_names[-1] = reference
-        cals[-1] = cals[-2]
-        units[-1] = units[-2]
+        cals[n] = float(resolution)
+        ranges[n] = _unit_dict.get(u(unit), unit) * scale
+    ch_names[-1] = 'STI 014'
+    cals[-1] = 1.
+    ranges[-1] = 1.
+    if np.isnan(cals).any():
+        raise RuntimeError('Missing channel units')
 
     # Attempts to extract filtering info from header. If not found, both are
     # set to zero.
@@ -528,12 +367,11 @@ def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
                 break
             else:
                 idx = None
+
     if idx:
         lowpass = []
         highpass = []
-        for i, ch in enumerate(ch_names, 1):
-            if ch == reference:
-                continue
+        for i, ch in enumerate(ch_names[:-1], 1):
             line = settings[idx + i].split()
             assert ch in line
             highpass.append(line[5])
@@ -544,11 +382,11 @@ def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
             if highpass[0] == 'NaN':
                 info['highpass'] = None
             elif highpass[0] == 'DC':
-                info['highpass'] = 0
+                info['highpass'] = 0.
             else:
-                info['highpass'] = int(highpass[0])
+                info['highpass'] = float(highpass[0])
         else:
-            info['highpass'] = np.min(highpass)
+            info['highpass'] = np.min(np.array(highpass, dtype=np.float))
             warnings.warn('%s' % ('Channels contain different highpass '
                                   'filters. Highest filter setting will '
                                   'be stored.'))
@@ -558,127 +396,117 @@ def _get_eeg_info(vhdr_fname, elp_fname, elp_names, reference, eog):
             if lowpass[0] == 'NaN':
                 info['lowpass'] = None
             else:
-                info['lowpass'] = int(lowpass[0])
+                info['lowpass'] = float(lowpass[0])
         else:
-            info['lowpass'] = np.min(lowpass)
+            info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
             warnings.warn('%s' % ('Channels contain different lowpass filters.'
                                   ' Lowest filter setting will be stored.'))
+
+        # Post process highpass and lowpass to take into account units
+        header = settings[idx].split('  ')
+        header = [h for h in header if len(h)]
+        if '[s]' in header[4] and info['highpass'] is not None \
+                and (info['highpass'] > 0):
+            info['highpass'] = 1. / info['highpass']
+        if '[s]' in header[5] and info['lowpass'] is not None:
+            info['lowpass'] = 1. / info['lowpass']
     else:
         info['highpass'] = None
         info['lowpass'] = None
 
     # locate EEG and marker files
     path = os.path.dirname(vhdr_fname)
-    info['file_id'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
-    eeg_info['marker_id'] = os.path.join(path, cfg.get('Common Infos',
-                                                       'MarkerFile'))
+    info['filename'] = os.path.join(path, cfg.get('Common Infos', 'DataFile'))
     info['meas_date'] = int(time.time())
 
     # Creates a list of dicts of eeg channels for raw.info
     logger.info('Setting channel info structure...')
     info['chs'] = []
-    info['nchan'] = n_eeg_chan
     info['ch_names'] = ch_names
-    info['sfreq'] = sfreq
-    if elp_fname and elp_names:
-        ch_locs = _get_elp_locs(elp_fname, elp_names)
-        info['dig'] = [{'r': ch_locs['nasion'],
-                        'ident': FIFF.FIFFV_POINT_NASION,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame':  FIFF.FIFFV_COORD_HEAD},
-                       {'r': ch_locs['lpa'], 'ident': FIFF.FIFFV_POINT_LPA,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD},
-                       {'r': ch_locs['rpa'], 'ident': FIFF.FIFFV_POINT_RPA,
-                        'kind': FIFF.FIFFV_POINT_CARDINAL,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD}]
-    else:
-        ch_locs = None
-
-    missing_positions = []
-    idxs = range(1, len(ch_names) + 1)
-    for idx, ch_name, cal, unit_mul in zip(idxs, ch_names, cals, units):
-        is_eog = ch_name in eog
-        if ch_locs is None:
-            loc = np.zeros(3)
-        elif ch_name in ch_locs:
-            loc = ch_locs[ch_name]
-        else:
-            loc = np.zeros(3)
-            if not is_eog:
-                missing_positions.append(ch_name)
-
-        if is_eog:
+    for idx, ch_name in enumerate(ch_names):
+        if ch_name in eog or idx in eog or idx - info['nchan'] in eog:
             kind = FIFF.FIFFV_EOG_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_V
+        elif ch_name in misc or idx in misc or idx - info['nchan'] in misc:
+            kind = FIFF.FIFFV_MISC_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_V
+        elif ch_name == 'STI 014':
+            kind = FIFF.FIFFV_STIM_CH
+            coil_type = FIFF.FIFFV_COIL_NONE
+            unit = FIFF.FIFF_UNIT_NONE
         else:
             kind = FIFF.FIFFV_EEG_CH
-
-        chan_info = {'ch_name': ch_name,
-                     'coil_type': FIFF.FIFFV_COIL_EEG,
-                     'kind': kind,
-                     'logno': idx,
-                     'scanno': idx,
-                     'cal': cal,
-                     'range': 1.,
-                     'unit_mul': unit_mul,
-                     'unit': FIFF.FIFF_UNIT_V,
-                     'coord_frame': FIFF.FIFFV_COORD_HEAD,
-                     'eeg_loc': loc,
-                     'loc': np.hstack((loc, np.zeros(9)))}
-
-        info['chs'].append(chan_info)
-
-    # raise error if positions are missing
-    if missing_positions:
-        err = ("The following positions are missing from the ELP "
-               "definitions: %s. If those channels lack positions because "
-               "they are EOG channels use the eog "
-               "parameter" % str(missing_positions))
-        raise KeyError(err)
+            coil_type = FIFF.FIFFV_COIL_EEG
+            unit = FIFF.FIFF_UNIT_V
+        info['chs'].append(dict(
+            ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,
+            scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),
+            unit=unit, unit_mul=0.,  # always zero- mne manual pg. 273
+            coord_frame=FIFF.FIFFV_COORD_HEAD))
 
     # for stim channel
-    events = _read_vmrk_events(eeg_info['marker_id'])
+    marker_id = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))
+    events = _read_vmrk_events(marker_id, response_trig_shift)
+    info._check_consistency()
+    return info, fmt, order, events
 
-    return info, eeg_info, events
 
-
-def read_raw_brainvision(vhdr_fname, elp_fname=None, elp_names=None,
-                         preload=False, reference=None,
-                         eog=['HEOGL', 'HEOGR', 'VEOGb'], ch_names=None,
-                         verbose=None):
+def read_raw_brainvision(vhdr_fname, montage=None,
+                         eog=('HEOGL', 'HEOGR', 'VEOGb'), misc=(),
+                         reference=None, scale=1., preload=False,
+                         response_trig_shift=0, verbose=None):
     """Reader for Brain Vision EEG file
 
     Parameters
     ----------
     vhdr_fname : str
         Path to the EEG header file.
-    elp_fname : str | None
-        Path to the elp file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-    elp_names : list | None
-        A list of channel names in the same order as the points in the elp
-        file. Electrode positions should be specified with the same names as
-        in the vhdr file, and fiducials should be specified as "lpa" "nasion",
-        "rpa". ELP positions with other names are ignored. If elp_names is not
-        None and channels are missing, a KeyError is raised.
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple of str
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the vhdr file
+        Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.
+    misc : list or tuple of str
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes
+        in the vhdr file. Default is ``()``.
     reference : None | str
+        **Deprecated**, use `add_reference_channel` instead.
         Name of the electrode which served as the reference in the recording.
         If a name is provided, a corresponding channel is added and its data
         is set to 0. This is useful for later re-referencing. The name should
-        correspond to a name in elp_names.
-    eog : list of str
-        Names of channels that should be designated EOG channels. Names should
-        correspond to the vhdr file (default: ['HEOGL', 'HEOGR', 'VEOGb']).
+        correspond to a name in elp_names. Data must be preloaded.
+    scale : float
+        The scaling factor for EEG data. Units are in volts. Default scale
+        factor is 1. For microvolts, the scale factor would be 1e-6. This is
+        used when the header file does not specify the scale factor.
+    preload : bool
+        If True, all data are loaded at initialization.
+        If False, data are not read until save.
+    response_trig_shift : int | None
+        An integer that will be added to all response triggers when reading
+        events (stimulus triggers will be unaffected). If None, response
+        triggers will be ignored. Default is 0 for backwards compatibility, but
+        typically another value or None will be necessary.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
+    Returns
+    -------
+    raw : instance of RawBrainVision
+        A Raw object containing BrainVision data.
+
     See Also
     --------
     mne.io.Raw : Documentation of attribute and methods.
     """
-    raw = RawBrainVision(vhdr_fname, elp_fname, elp_names, preload,
-                         reference, eog, ch_names, verbose)
+    raw = RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,
+                         misc=misc, reference=reference, scale=scale,
+                         preload=preload, verbose=verbose,
+                         response_trig_shift=response_trig_shift)
     return raw
diff --git a/mne/io/brainvision/tests/data/test.hpts b/mne/io/brainvision/tests/data/test.hpts
new file mode 100644
index 0000000..8d84f17
--- /dev/null
+++ b/mne/io/brainvision/tests/data/test.hpts
@@ -0,0 +1,46 @@
+# Ascii stylus data file created by FastSCAN V4.0.7 on Thu Oct 17 12:30:44 2013
+# raw surface, 86 visible points, bounding box reference
+# x y z
+
+cardinal	nasion    -5.6729  -12.3873  -30.3671
+cardinal	lpa    -37.6782  -10.4957   91.5228
+cardinal	rpa    -131.3127    9.3976  -22.2363
+hpi    1    -30.4493  -11.8450   83.3601
+hpi    2    -122.5353    9.2232  -28.6828
+hpi    3    -6.8518  -47.0697  -37.0829
+hpi    4    7.3744  -50.6297  -12.1376
+hpi    5    -33.4264  -43.7352  -57.7756
+eeg    FP1  3.8676  -77.0439  -13.0212
+eeg    FP2  -31.9297  -70.6852  -57.4881
+eeg    F7  -6.1042  -68.2969   45.4939
+eeg    GND  -26.8874 -108.1869  -29.3948
+eeg    F8   -93.4960  -57.5314  -59.6307
+eeg    FC5  -28.5191  -90.8090   65.3667
+eeg    F3   -20.3574 -115.7971   26.8439
+eeg    Fz   -52.4084 -132.2694  -10.8940
+eeg    F4   -79.6612 -109.5778  -50.2500
+eeg    FC6  -120.4482  -80.1049  -48.4998
+eeg    FC1  -59.5687 -140.7296   28.7939
+eeg    FCz  -79.2198 -141.0449    8.6352
+eeg    FC2  -98.5593 -130.9501  -14.6008
+eeg    CP5  -73.7114  -79.5972  108.4127
+eeg    C3   -64.2139 -118.9901   81.5907
+eeg    Cz   -109.4613 -144.7746   38.7691
+eeg    C4   -144.6454 -113.6235  -15.1309
+eeg    CP6  -172.6252  -72.4156   -5.0970
+eeg    CP1  -111.8295 -132.7764   88.6002
+eeg    CPz  -133.5129 -127.0993   66.4257
+eeg    CP2  -155.4911 -128.0503   41.9582
+eeg    P7   -87.9713  -42.7048  122.4772
+eeg    P3   -125.8923  -88.7830  113.4730
+eeg    Pz   -159.2922 -104.0640   84.9862
+eeg    P4   -188.8384  -81.4090   45.6836
+eeg    P8   -179.7623  -35.5428   13.4639
+eeg    O1   -134.3199   -7.4196   99.9593
+eeg    POz  -167.0685  -28.2038   84.3689
+eeg    O2   -172.0302   -4.7960   60.4032
+eeg    A1   -65.6105  -14.9387  115.0734
+eeg    ReRef -163.2992    3.5260   -9.0238
+eeg    HL    1.0591    6.2860    4.8814
+eeg    HR   -51.6423   10.2912  -47.8098
+eeg    Vb   -61.2542   29.6360  -43.4039
diff --git a/mne/io/brainvision/tests/data/test.vmrk b/mne/io/brainvision/tests/data/test.vmrk
index c14d4f3..16eccb9 100755
--- a/mne/io/brainvision/tests/data/test.vmrk
+++ b/mne/io/brainvision/tests/data/test.vmrk
@@ -18,5 +18,6 @@ Mk6=Stimulus,S254,3253,1,0
 Mk7=Stimulus,S255,3263,1,0
 Mk8=Stimulus,S253,4936,1,0
 Mk9=Stimulus,S255,4946,1,0
-Mk10=Stimulus,S254,6620,1,0
-Mk11=Stimulus,S255,6630,1,0
+Mk10=Response,R255,6000,1,0
+Mk11=Stimulus,S254,6620,1,0
+Mk12=Stimulus,S255,6630,1,0
diff --git a/mne/io/brainvision/tests/data/test_highpass.vhdr b/mne/io/brainvision/tests/data/test_highpass.vhdr
new file mode 100755
index 0000000..cda8d8a
--- /dev/null
+++ b/mne/io/brainvision/tests/data/test_highpass.vhdr
@@ -0,0 +1,142 @@
+Brain Vision Data Exchange Header File Version 1.0
+; Data created by the Vision Recorder
+
+[Common Infos]
+Codepage=UTF-8
+DataFile=test.eeg
+MarkerFile=test.vmrk
+DataFormat=BINARY
+; Data orientation: MULTIPLEXED=ch1,pt1, ch2,pt1 ...
+DataOrientation=MULTIPLEXED
+NumberOfChannels=32
+; Sampling interval in microseconds
+SamplingInterval=1000
+
+[Binary Infos]
+BinaryFormat=INT_16
+
+[Channel Infos]
+; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,
+; <Resolution in "Unit">,<Unit>, Future extensions..
+; Fields are delimited by commas, some fields might be omitted (empty).
+; Commas in channel names are coded as "\1".
+Ch1=FP1,,0.5,µV
+Ch2=FP2,,0.5,µV
+Ch3=F3,,0.5,µV
+Ch4=F4,,0.5,µV
+Ch5=C3,,0.5,µV
+Ch6=C4,,0.5,µV
+Ch7=P3,,0.5,µV
+Ch8=P4,,0.5,µV
+Ch9=O1,,0.5,µV
+Ch10=O2,,0.5,µV
+Ch11=F7,,0.5,µV
+Ch12=F8,,0.5,µV
+Ch13=P7,,0.5,µV
+Ch14=P8,,0.5,µV
+Ch15=Fz,,0.5,µV
+Ch16=FCz,,0.5,µV
+Ch17=Cz,,0.5,µV
+Ch18=CPz,,0.5,µV
+Ch19=Pz,,0.5,µV
+Ch20=POz,,0.5,µV
+Ch21=FC1,,0.5,µV
+Ch22=FC2,,0.5,µV
+Ch23=CP1,,0.5,µV
+Ch24=CP2,,0.5,µV
+Ch25=FC5,,0.5,µV
+Ch26=FC6,,0.5,µV
+Ch27=CP5,,0.5,µV
+Ch28=CP6,,0.5,µV
+Ch29=HL,,0.5,µV
+Ch30=HR,,0.5,µV
+Ch31=Vb,,0.5,µV
+Ch32=ReRef,,0.5,µV
+
+[Comment]
+
+A m p l i f i e r  S e t u p
+============================
+Number of channels: 32
+Sampling Rate [Hz]: 1000
+Sampling Interval [µS]: 1000
+
+Channels
+--------
+#     Name      Phys. Chn.    Resolution / Unit   Low Cutoff [s]   High Cutoff [Hz]   Notch [Hz]    Series Res. [kOhm] Gradient         Offset
+1     FP1         1                0.5 µV             10              250              Off                0
+2     FP2         2                0.5 µV             10              250              Off                0
+3     F3          3                0.5 µV             10              250              Off                0
+4     F4          4                0.5 µV             10              250              Off                0
+5     C3          5                0.5 µV             10              250              Off                0
+6     C4          6                0.5 µV             10              250              Off                0
+7     P3          7                0.5 µV             10              250              Off                0
+8     P4          8                0.5 µV             10              250              Off                0
+9     O1          9                0.5 µV             10              250              Off                0
+10    O2          10               0.5 µV             10              250              Off                0
+11    F7          11               0.5 µV             10              250              Off                0
+12    F8          12               0.5 µV             10              250              Off                0
+13    P7          13               0.5 µV             10              250              Off                0
+14    P8          14               0.5 µV             10              250              Off                0
+15    Fz          15               0.5 µV             10              250              Off                0
+16    FCz         16               0.5 µV             10              250              Off                0
+17    Cz          17               0.5 µV             10              250              Off                0
+18    CPz         18               0.5 µV             10              250              Off                0
+19    Pz          19               0.5 µV             10              250              Off                0
+20    POz         20               0.5 µV             10              250              Off                0
+21    FC1         21               0.5 µV             10              250              Off                0
+22    FC2         22               0.5 µV             10              250              Off                0
+23    CP1         23               0.5 µV             10              250              Off                0
+24    CP2         24               0.5 µV             10              250              Off                0
+25    FC5         25               0.5 µV             10              250              Off                0
+26    FC6         26               0.5 µV             10              250              Off                0
+27    CP5         27               0.5 µV             10              250              Off                0
+28    CP6         28               0.5 µV             10              250              Off                0
+29    HL          29               0.5 µV             10              250              Off                0
+30    HR          30               0.5 µV             10              250              Off                0
+31    Vb          31               0.5 µV             10              250              Off                0
+32    ReRef       32               0.5 µV             10              250              Off                0
+
+S o f t w a r e  F i l t e r s
+==============================
+Disabled
+
+
+Data Electrodes Selected Impedance Measurement Range: 0 - 100 kOhm
+Ground Electrode Selected Impedance Measurement Range: 0 - 10 kOhm
+Reference Electrode Selected Impedance Measurement Range: 0 - 10 kOhm
+Impedance [kOhm] at 16:12:27 :
+FP1:        ???
+FP2:        ???
+F3:         ???
+F4:         ???
+C3:         ???
+C4:         ???
+P3:         ???
+P4:         ???
+O1:         ???
+O2:         ???
+F7:         ???
+F8:         ???
+P7:         ???
+P8:         ???
+Fz:         ???
+FCz:        ???
+Cz:         ???
+CPz:        ???
+Pz:         ???
+POz:        ???
+FC1:        ???
+FC2:        ???
+CP1:        ???
+CP2:        ???
+FC5:        ???
+FC6:        ???
+CP5:        ???
+CP6:        ???
+HL:         ???
+HR:         ???
+Vb:         ???
+ReRef:      ???
+Ref:          0
+Gnd:          4
diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py
index 3ffbed7..ca338f4 100644
--- a/mne/io/brainvision/tests/test_brainvision.py
+++ b/mne/io/brainvision/tests/test_brainvision.py
@@ -1,46 +1,57 @@
 """Data Equivalence Tests"""
 from __future__ import print_function
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
 import os.path as op
 import inspect
 
-from nose.tools import assert_equal
+from nose.tools import assert_equal, assert_raises, assert_true
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_equal
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
 
-import mne
-from mne.utils import _TempDir
-from mne import pick_types
+from mne.utils import _TempDir, run_tests_if_main
+from mne import pick_types, concatenate_raws, find_events
 from mne.io.constants import FIFF
-from mne.io import Raw
-from mne.io import read_raw_brainvision
+from mne.io import Raw, read_raw_brainvision
 
 FILE = inspect.getfile(inspect.currentframe())
 data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
 vhdr_path = op.join(data_dir, 'test.vhdr')
-elp_path = op.join(data_dir, 'test_elp.txt')
+vmrk_path = op.join(data_dir, 'test.vmrk')
+vhdr_highpass_path = op.join(data_dir, 'test_highpass.vhdr')
+montage = op.join(data_dir, 'test.hpts')
 eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
-elp_names = ['nasion', 'lpa', 'rpa', None, None, None, None, None,
-             'FP1', 'FP2', 'F7', 'GND', 'F8',
-             'FC5', 'F3', 'Fz', 'F4', 'FC6',
-             'FC1', 'FCz', 'FC2', 'CP5', 'C3',
-             'Cz', 'C4', 'CP6', 'CP1', 'CPz',
-             'CP2', 'P7', 'P3', 'Pz', 'P4',
-             'P8', 'O1', 'POz', 'O2', 'A1',
-             'ReRef', 'HL', 'HR', 'Vb']
-eog = ('HL', 'HR', 'Vb')
+eog = ['HL', 'HR', 'Vb']
 
-tempdir = _TempDir()
+
+def test_brainvision_data_filters():
+    """Test reading raw Brain Vision files
+    """
+    raw = read_raw_brainvision(vhdr_highpass_path, montage, eog=eog,
+                               preload=True)
+    assert_equal(raw.info['highpass'], 0.1)
+    assert_equal(raw.info['lowpass'], 250.)
+    raw.info["lowpass"] = None
+    raw.filter(1, 30)
 
 
 def test_brainvision_data():
     """Test reading raw Brain Vision files
     """
-    raw_py = read_raw_brainvision(vhdr_path, elp_path, elp_names, preload=True)
+    assert_raises(IOError, read_raw_brainvision, vmrk_path)
+    assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,
+                  preload=True, scale="foo")
+    raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True)
+    raw_py.load_data()  # currently does nothing
+    assert_true('RawBrainVision' in repr(raw_py))
+
+    assert_equal(raw_py.info['highpass'], 0.)
+    assert_equal(raw_py.info['lowpass'], 250.)
+
     picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
     data_py, times_py = raw_py[picks]
 
@@ -56,44 +67,88 @@ def test_brainvision_data():
     assert_array_almost_equal(times_py, times_bin)
 
     # Make sure EOG channels are marked correctly
-    raw_py = read_raw_brainvision(vhdr_path, elp_path, elp_names, eog=eog,
+    raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog,
                                   preload=True)
     for ch in raw_py.info['chs']:
         if ch['ch_name'] in eog:
             assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)
-        elif ch['ch_name'] in elp_names:
-            assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
         elif ch['ch_name'] == 'STI 014':
             assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)
+        elif ch['ch_name'] in raw_py.info['ch_names']:
+            assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)
         else:
             raise RuntimeError("Unknown Channel: %s" % ch['ch_name'])
 
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
 
 def test_events():
     """Test reading and modifying events"""
-    raw = read_raw_brainvision(vhdr_path, preload=True)
+    tempdir = _TempDir()
 
     # check that events are read and stim channel is synthesized correcly
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
     events = raw.get_brainvision_events()
-    assert_array_equal(events, [[ 487, 1, 253],
-                                [ 497, 1, 255],
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
                                 [1770, 1, 254],
                                 [1780, 1, 255],
                                 [3253, 1, 254],
                                 [3263, 1, 255],
                                 [4936, 1, 253],
                                 [4946, 1, 255],
+                                [6000, 1, 255],
                                 [6620, 1, 254],
                                 [6630, 1, 255]])
 
-    mne_events = mne.find_events(raw, stim_channel='STI 014')
+    # check that events are read and stim channel is synthesized correcly and
+    # response triggers are shifted like they're supposed to be.
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
+                               response_trig_shift=1000)
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6000, 1, 1255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    # check that events are read and stim channel is synthesized correcly and
+    # response triggers are ignored.
+    raw = read_raw_brainvision(vhdr_path, eog=eog, preload=True,
+                               response_trig_shift=None)
+    events = raw.get_brainvision_events()
+    assert_array_equal(events, [[487, 1, 253],
+                                [497, 1, 255],
+                                [1770, 1, 254],
+                                [1780, 1, 255],
+                                [3253, 1, 254],
+                                [3263, 1, 255],
+                                [4936, 1, 253],
+                                [4946, 1, 255],
+                                [6620, 1, 254],
+                                [6630, 1, 255]])
+
+    assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
+                  preload=True, response_trig_shift=0.1)
+    assert_raises(TypeError, read_raw_brainvision, vhdr_path, eog=eog,
+                  preload=True, response_trig_shift=np.nan)
+
+    mne_events = find_events(raw, stim_channel='STI 014')
     assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
 
     # modify events and check that stim channel is updated
     index = events[:, 2] == 255
     events = events[index]
     raw.set_brainvision_events(events)
-    mne_events = mne.find_events(raw, stim_channel='STI 014')
+    mne_events = find_events(raw, stim_channel='STI 014')
     assert_array_equal(events[:, [0, 2]], mne_events[:, [0, 2]])
 
     # remove events
@@ -101,9 +156,11 @@ def test_events():
     ch_name = raw.info['chs'][-2]['ch_name']
     events = np.empty((0, 3))
     raw.set_brainvision_events(events)
-    assert_equal(raw.info['nchan'], nchan - 1)
-    assert_equal(len(raw._data), nchan - 1)
-    assert_equal(raw.info['chs'][-1]['ch_name'], ch_name)
+    assert_equal(raw.info['nchan'], nchan)
+    assert_equal(len(raw._data), nchan)
+    assert_equal(raw.info['chs'][-2]['ch_name'], ch_name)
+    assert_equal(len(find_events(raw, 'STI 014')), 0)
+    assert_allclose(raw[-1][0], 0.)
     fname = op.join(tempdir, 'evt_raw.fif')
     raw.save(fname)
 
@@ -118,7 +175,8 @@ def test_events():
 def test_read_segment():
     """Test writing raw eeg files when preload is False
     """
-    raw1 = read_raw_brainvision(vhdr_path, preload=False)
+    tempdir = _TempDir()
+    raw1 = read_raw_brainvision(vhdr_path, eog=eog, preload=False)
     raw1_file = op.join(tempdir, 'test1-raw.fif')
     raw1.save(raw1_file, overwrite=True)
     raw11 = Raw(raw1_file, preload=True)
@@ -128,7 +186,7 @@ def test_read_segment():
     assert_array_almost_equal(times1, times11)
     assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
 
-    raw2 = read_raw_brainvision(vhdr_path, preload=True)
+    raw2 = read_raw_brainvision(vhdr_path, eog=eog, preload=True)
     raw2_file = op.join(tempdir, 'test2-raw.fif')
     raw2.save(raw2_file, overwrite=True)
     data2, times2 = raw2[:, :]
@@ -141,17 +199,9 @@ def test_read_segment():
 
     # save with buffer size smaller than file
     raw3_file = op.join(tempdir, 'test3-raw.fif')
-    raw3 = read_raw_brainvision(vhdr_path)
+    raw3 = read_raw_brainvision(vhdr_path, eog=eog)
     raw3.save(raw3_file, buffer_size_sec=2)
     raw3 = Raw(raw3_file, preload=True)
     assert_array_equal(raw3._data, raw1._data)
 
-    # add reference channel
-    raw4_file = op.join(tempdir, 'test4-raw.fif')
-    raw4 = read_raw_brainvision(vhdr_path, reference='A1')
-    raw4.save(raw4_file, buffer_size_sec=2)
-    raw4 = Raw(raw4_file, preload=True)
-    ref_idx = raw4.ch_names.index('A1')
-    assert_equal(len(raw4._data), len(raw1._data) + 1)
-    ref_data, _ = raw4[ref_idx]
-    assert_array_equal(ref_data, 0)
+run_tests_if_main()
diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py
index b6e88bb..caa1be4 100644
--- a/mne/io/bti/bti.py
+++ b/mne/io/bti/bti.py
@@ -9,43 +9,91 @@
 
 import os.path as op
 from itertools import count
+
 import numpy as np
 
 from ...utils import logger, verbose, sum_squared
+from ...transforms import (combine_transforms, invert_transform, apply_trans,
+                           Transform)
 from ..constants import FIFF
-from ..base import _BaseRaw
+from .. import _BaseRaw, _coil_trans_to_loc, _loc_to_coil_trans, _empty_info
 from .constants import BTI
 from .read import (read_int32, read_int16, read_str, read_float, read_double,
                    read_transform, read_char, read_int64, read_uint16,
                    read_uint32, read_double_matrix, read_float_matrix,
                    read_int16_matrix)
-from .transforms import (bti_identity_trans, bti_to_vv_trans,
-                         bti_to_vv_coil_trans, inverse_trans, merge_trans)
-from ..meas_info import Info
 from ...externals import six
 
-FIFF_INFO_CHS_FIELDS = ('loc', 'ch_name', 'unit_mul', 'coil_trans',
-                        'coord_frame', 'coil_type', 'range', 'unit', 'cal',
-                        'eeg_loc', 'scanno', 'kind', 'logno')
+FIFF_INFO_CHS_FIELDS = ('loc',
+                        'ch_name', 'unit_mul', 'coord_frame', 'coil_type',
+                        'range', 'unit', 'cal',
+                        'scanno', 'kind', 'logno')
 
 FIFF_INFO_CHS_DEFAULTS = (np.array([0, 0, 0, 1] * 3, dtype='f4'),
-                          None, 0, None, 0, 0, 1.0,
-                          107, 1.0, None, None, 402, None)
+                          None, 0, 0, 0,
+                          1.0, FIFF.FIFF_UNIT_V, 1.0,
+                          None, FIFF.FIFFV_ECG_CH, None)
 
 FIFF_INFO_DIG_FIELDS = ('kind', 'ident', 'r', 'coord_frame')
 FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD)
 
-BTI_WH2500_REF_MAG = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA']
-BTI_WH2500_REF_GRAD = ['GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA']
+BTI_WH2500_REF_MAG = ('MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA')
+BTI_WH2500_REF_GRAD = ('GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA')
 
 dtypes = zip(list(range(1, 5)), ('>i2', '>i4', '>f4', '>f8'))
 DTYPES = dict((i, np.dtype(t)) for i, t in dtypes)
 
-RAW_INFO_FIELDS = ['dev_head_t', 'nchan', 'bads', 'projs', 'dev_ctf_t',
-                   'meas_date', 'meas_id', 'dig', 'sfreq', 'highpass',
-                   'comps', 'chs', 'ch_names', 'file_id',
-                   'lowpass', 'acq_pars', 'acq_stim', 'filename',
-                   'ctf_head_t']
+
+class _bytes_io_mock_context():
+
+    def __init__(self, target):
+        self.target = target
+
+    def __enter__(self):
+        return self.target
+
+    def __exit__(self, type, value, tb):
+        pass
+
+
+def _bti_open(fname, *args, **kwargs):
+    """Handle bytes io"""
+    if isinstance(fname, six.string_types):
+        return open(fname, *args, **kwargs)
+    elif isinstance(fname, six.BytesIO):
+        return _bytes_io_mock_context(fname)
+    else:
+        raise RuntimeError('Cannot mock this.')
+
+
+def _get_bti_dev_t(adjust=0., translation=(0.0, 0.02, 0.11)):
+    """Get the general Magnes3600WH to Neuromag coordinate transform
+
+    Parameters
+    ----------
+    adjust : float | None
+        Degrees to tilt x-axis for sensor frame misalignment.
+        If None, no adjustment will be applied.
+    translation : array-like
+        The translation to place the origin of coordinate system
+        to the center of the head.
+
+    Returns
+    -------
+    m_nm_t : ndarray
+        4 x 4 rotation, translation, scaling matrix.
+    """
+    flip_t = np.array([[0., -1., 0.],
+                       [1., 0., 0.],
+                       [0., 0., 1.]])
+    rad = np.deg2rad(adjust)
+    adjust_t = np.array([[1., 0., 0.],
+                         [0., np.cos(rad), -np.sin(rad)],
+                         [0., np.sin(rad), np.cos(rad)]])
+    m_nm_t = np.eye(4)
+    m_nm_t[:3, :3] = np.dot(flip_t, adjust_t)
+    m_nm_t[:3, 3] = translation
+    return m_nm_t
 
 
 def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
@@ -70,7 +118,7 @@ def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
             name = 'STI 013'
         elif name == 'TRIGGER':
             name = 'STI 014'
-        elif any([name == k for k in eog_ch]):
+        elif any(name == k for k in eog_ch):
             name = 'EOG %3.3d' % six.advance_iterator(eog)
         elif name == ecg_ch:
             name = 'ECG 001'
@@ -92,7 +140,8 @@ def _rename_channels(names, ecg_ch='E31', eog_ch=('E63', 'E64')):
 
 def _read_head_shape(fname):
     """ Helper Function """
-    with open(fname, 'rb') as fid:
+
+    with _bti_open(fname, 'rb') as fid:
         fid.seek(BTI.FILE_HS_N_DIGPOINTS)
         _n_dig_points = read_int32(fid)
         idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3)
@@ -101,7 +150,7 @@ def _read_head_shape(fname):
     return idx_points, dig_points
 
 
-def _convert_head_shape(idx_points, dig_points):
+def _get_ctf_head_to_head_t(idx_points):
     """ Helper function """
 
     fp = idx_points.astype('>f8')
@@ -111,89 +160,97 @@ def _convert_head_shape(idx_points, dig_points):
     dsin = np.sqrt(1. - dcos * dcos)
     dt = dp / np.sqrt(tmp2)
 
-    idx_points_nm = np.ones((len(fp), 3), dtype='>f8')
-    for idx, f in enumerate(fp):
-        idx_points_nm[idx, 0] = dcos * f[0] - dsin * f[1] + dt
-        idx_points_nm[idx, 1] = dsin * f[0] + dcos * f[1]
-        idx_points_nm[idx, 2] = f[2]
+    # do the transformation
+    t = np.array([[dcos, -dsin, 0., dt],
+                  [dsin, dcos, 0., 0.],
+                  [0., 0., 1., 0.],
+                  [0., 0., 0., 1.]])
+    return Transform('ctf_head', 'head', t)
+
 
+def _flip_fiducials(idx_points_nm):
     # adjust order of fiducials to Neuromag
+    # XXX presumably swap LPA and RPA
     idx_points_nm[[1, 2]] = idx_points_nm[[2, 1]]
-
-    t = bti_identity_trans('>f8')
-    t[0, 0] = dcos
-    t[0, 1] = -dsin
-    t[1, 0] = dsin
-    t[1, 1] = dcos
-    t[0, 3] = dt
-
-    dig_points_nm = np.dot(t[BTI.T_ROT_IX], dig_points.T).T
-    dig_points_nm += t[BTI.T_TRANS_IX].T
-
-    return idx_points_nm, dig_points_nm, t
+    return idx_points_nm
 
 
-def _setup_head_shape(fname, use_hpi=True):
+def _process_bti_headshape(fname, convert=True, use_hpi=True):
     """Read index points and dig points from BTi head shape file
 
     Parameters
     ----------
     fname : str
         The absolute path to the head shape file
+    use_hpi : bool
+        Whether to treat additional hpi coils as digitization points or not.
+        If False, hpi coils will be discarded.
 
     Returns
     -------
     dig : list of dicts
         The list of dig point info structures needed for the fiff info
         structure.
-    use_hpi : bool
-        Whether to treat additional hpi coils as digitization points or not.
-        If False, hpi coils will be discarded.
+    t : dict
+        The transformation that was used.
     """
     idx_points, dig_points = _read_head_shape(fname)
-    idx_points, dig_points, t = _convert_head_shape(idx_points, dig_points)
-    all_points = np.r_[idx_points, dig_points].astype('>f4')
+    if convert:
+        ctf_head_t = _get_ctf_head_to_head_t(idx_points)
+    else:
+        ctf_head_t = Transform('ctf_head', 'ctf_head', np.eye(4))
+
+    if dig_points is not None:
+        # dig_points = apply_trans(ctf_head_t['trans'], dig_points)
+        all_points = np.r_[idx_points, dig_points]
+    else:
+        all_points = idx_points
 
-    idx_idents = list(range(1, 4)) + list(range(1, (len(idx_points) + 1) - 3))
+    if convert:
+        all_points = _convert_hs_points(all_points, ctf_head_t)
+
+    dig = _points_to_dig(all_points, len(idx_points), use_hpi)
+    return dig, ctf_head_t
+
+
+def _convert_hs_points(points, t):
+    """convert to Neuromag"""
+    points = apply_trans(t['trans'], points)
+    points = _flip_fiducials(points).astype(np.float32)
+    return points
+
+
+def _points_to_dig(points, n_idx_points, use_hpi):
+    """Put points in info dig structure"""
+    idx_idents = list(range(1, 4)) + list(range(1, (n_idx_points + 1) - 3))
     dig = []
-    for idx in range(all_points.shape[0]):
+    for idx in range(points.shape[0]):
         point_info = dict(zip(FIFF_INFO_DIG_FIELDS, FIFF_INFO_DIG_DEFAULTS))
-        point_info['r'] = all_points[idx]
+        point_info['r'] = points[idx]
         if idx < 3:
             point_info['kind'] = FIFF.FIFFV_POINT_CARDINAL
             point_info['ident'] = idx_idents[idx]
-        if 2 < idx < len(idx_points) and use_hpi:
+        if 2 < idx < n_idx_points and use_hpi:
             point_info['kind'] = FIFF.FIFFV_POINT_HPI
             point_info['ident'] = idx_idents[idx]
         elif idx > 4:
             point_info['kind'] = FIFF.FIFFV_POINT_EXTRA
             point_info['ident'] = (idx + 1) - len(idx_idents)
 
-        if 2 < idx < len(idx_points) and not use_hpi:
+        if 2 < idx < n_idx_points and not use_hpi:
             pass
         else:
             dig += [point_info]
 
-    return dig, t
+    return dig
 
 
-def _convert_coil_trans(coil_trans, bti_trans, bti_to_nm):
+def _convert_coil_trans(coil_trans, dev_ctf_t, bti_dev_t):
     """ Helper Function """
-    t = bti_to_vv_coil_trans(coil_trans, bti_trans, bti_to_nm)
-    loc = np.roll(t.copy().T, 1, 0)[:, :3].flatten()
-
-    return t, loc
-
-
-def _convert_dev_head_t(bti_trans, bti_to_nm, m_h_nm_h):
-    """ Helper Function """
-    nm_to_m_sensor = inverse_trans(bti_identity_trans(), bti_to_nm)
-    nm_sensor_m_head = merge_trans(bti_trans, nm_to_m_sensor)
-
-    nm_dev_head_t = merge_trans(m_h_nm_h, nm_sensor_m_head)
-    nm_dev_head_t[3, :3] = 0.
-
-    return nm_dev_head_t
+    t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t,
+                           'ctf_head', 'meg')
+    t = np.dot(t['trans'], coil_trans)
+    return t
 
 
 def _correct_offset(fid):
@@ -218,7 +275,8 @@ def _read_config(fname):
         The config blocks found.
 
     """
-    with open(fname, 'rb') as fid:
+
+    with _bti_open(fname, 'rb') as fid:
         cfg = dict()
         cfg['hdr'] = {'version': read_int16(fid),
                       'site_name': read_str(fid, 32),
@@ -449,11 +507,11 @@ def _read_config(fname):
                                                    dta['hdr']['n_dsp']),
                                                   dtype='f4')
                         for n in range(dta['hdr']['n_entries']):
-                            dta['anlg_wts'][d] = read_int16_matrix(fid, 1,
-                                                       dta['hdr']['n_anlg'])
+                            dta['anlg_wts'][d] = read_int16_matrix(
+                                fid, 1, dta['hdr']['n_anlg'])
                             read_int16(fid)
-                            dta['dsp_wts'][d] = read_float_matrix(fid, 1,
-                                                        dta['hdr']['n_dsp'])
+                            dta['dsp_wts'][d] = read_float_matrix(
+                                fid, 1, dta['hdr']['n_dsp'])
 
                         _correct_offset(fid)
 
@@ -481,9 +539,9 @@ def _read_config(fname):
         cfg['chs'] = list()
 
         # prepare reading channels
-        dev_header = lambda x: {'size': read_int32(x),
-                                'checksum': read_int32(x),
-                                'reserved': read_str(x, 32)}
+        def dev_header(x):
+            return dict(size=read_int32(x), checksum=read_int32(x),
+                        reserved=read_str(x, 32))
 
         for channel in range(cfg['hdr']['total_chans']):
             ch = {'name': read_str(fid, 16),
@@ -507,7 +565,7 @@ def _read_config(fname):
                 dev = {'device_info': dev_header(fid),
                        'inductance': read_float(fid),
                        'padding': read_str(fid, 4),
-                       'transform': read_transform(fid),
+                       'transform': _correct_trans(read_transform(fid)),
                        'xform_flag': read_int16(fid),
                        'total_loops': read_int16(fid)}
 
@@ -754,10 +812,9 @@ def _read_ch_config(fid):
     return cfg
 
 
-def _read_bti_header(pdf_fname, config_fname):
-    """ Read bti PDF header
-    """
-    with open(pdf_fname, 'rb') as fid:
+def _read_bti_header_pdf(pdf_fname):
+    """Read header from pdf file"""
+    with _bti_open(pdf_fname, 'rb') as fid:
         fid.seek(-8, 2)
         start = fid.tell()
         header_position = read_int64(fid)
@@ -775,28 +832,28 @@ def _read_bti_header(pdf_fname, config_fname):
         # actual header starts here
         info = {'version': read_int16(fid),
                 'file_type': read_str(fid, 5),
-                'hdr_size': start - header_position,  # add to info for convenience
+                'hdr_size': start - header_position,  # add for convenience
                 'start': start}
 
         fid.seek(1, 1)
 
         info.update({'data_format': read_int16(fid),
-                    'acq_mode': read_int16(fid),
-                    'total_epochs': read_int32(fid),
-                    'input_epochs': read_int32(fid),
-                    'total_events': read_int32(fid),
-                    'total_fixed_events': read_int32(fid),
-                    'sample_period': read_float(fid),
-                    'xaxis_label': read_str(fid, 16),
-                    'total_processes': read_int32(fid),
-                    'total_chans': read_int16(fid)})
+                     'acq_mode': read_int16(fid),
+                     'total_epochs': read_int32(fid),
+                     'input_epochs': read_int32(fid),
+                     'total_events': read_int32(fid),
+                     'total_fixed_events': read_int32(fid),
+                     'sample_period': read_float(fid),
+                     'xaxis_label': read_str(fid, 16),
+                     'total_processes': read_int32(fid),
+                     'total_chans': read_int16(fid)})
 
         fid.seek(2, 1)
         info.update({'checksum': read_int32(fid),
-                    'total_ed_classes': read_int32(fid),
-                    'total_associated_files': read_int16(fid),
-                    'last_file_index': read_int16(fid),
-                    'timestamp': read_int32(fid)})
+                     'total_ed_classes': read_int32(fid),
+                     'total_associated_files': read_int16(fid),
+                     'last_file_index': read_int16(fid),
+                     'timestamp': read_int32(fid)})
 
         fid.seek(20, 1)
         _correct_offset(fid)
@@ -830,50 +887,72 @@ def _read_bti_header(pdf_fname, config_fname):
     info['dtype'] = DTYPES[info['data_format']]
     bps = info['dtype'].itemsize * info['total_chans']
     info['bytes_per_slice'] = bps
+    return info
+
+
+def _read_bti_header(pdf_fname, config_fname, sort_by_ch_name=True):
+    """ Read bti PDF header
+    """
+    info = _read_bti_header_pdf(pdf_fname) if pdf_fname else dict()
 
     cfg = _read_config(config_fname)
     info['bti_transform'] = cfg['transforms']
 
     # augment channel list by according info from config.
     # get channels from config present in PDF
-    chans = info['chs']
-    chans_cfg = [c for c in cfg['chs'] if c['chan_no']
-                 in [c_['chan_no'] for c_ in chans]]
-
-    # check all pdf chanels are present in config
-    match = [c['chan_no'] for c in chans_cfg] == \
-            [c['chan_no'] for c in chans]
-
-    if not match:
-        raise RuntimeError('Could not match raw data channels with'
-                           ' config channels. Some of the channels'
-                           ' found are not described in config.')
+    chans = info.get('chs', None)
+    if chans is not None:
+        chans_cfg = [c for c in cfg['chs'] if c['chan_no']
+                     in [c_['chan_no'] for c_ in chans]]
+
+        # check all pdf chanels are present in config
+        match = [c['chan_no'] for c in chans_cfg] == \
+                [c['chan_no'] for c in chans]
+
+        if not match:
+            raise RuntimeError('Could not match raw data channels with'
+                               ' config channels. Some of the channels'
+                               ' found are not described in config.')
+    else:
+        chans_cfg = cfg['chs']
+        chans = [dict() for d in chans_cfg]
 
     # transfer channel info from config to channel info
     for ch, ch_cfg in zip(chans, chans_cfg):
         ch['upb'] = ch_cfg['units_per_bit']
         ch['gain'] = ch_cfg['gain']
         ch['name'] = ch_cfg['name']
-        ch['coil_trans'] = (ch_cfg['dev'].get('transform', None)
-                            if 'dev' in ch_cfg else None)
-        if info['data_format'] <= 2:
-            ch['cal'] = ch['scale'] * ch['upb'] * (ch['gain'] ** -1)
+        if ch_cfg.get('dev', dict()).get('transform', None) is not None:
+            ch['loc'] = _coil_trans_to_loc(ch_cfg['dev']['transform'])
         else:
-            ch['cal'] = ch['scale'] * ch['gain']
+            ch['loc'] = None
+        if pdf_fname:
+            if info['data_format'] <= 2:  # see DTYPES, implies integer
+                ch['cal'] = ch['scale'] * ch['upb'] / float(ch['gain'])
+            else:  # float
+                ch['cal'] = ch['scale'] * ch['gain']
+        else:
+            ch['scale'] = 1.0
+
+    if sort_by_ch_name:
+        by_index = [(i, d['index']) for i, d in enumerate(chans)]
+        by_index.sort(key=lambda c: c[1])
+        by_index = [idx[0] for idx in by_index]
+        chs = [chans[pos] for pos in by_index]
 
-    by_index = [(i, d['index']) for i, d in enumerate(chans)]
-    by_index.sort(key=lambda c: c[1])
-    by_index = [idx[0] for idx in by_index]
-    info['chs'] = [chans[pos] for pos in by_index]
+        sort_by_name_idx = [(i, d['name']) for i, d in enumerate(chs)]
+        a_chs = [c for c in sort_by_name_idx if c[1].startswith('A')]
+        other_chs = [c for c in sort_by_name_idx if not c[1].startswith('A')]
+        sort_by_name_idx = sorted(
+            a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs)
 
-    by_name = [(i, d['name']) for i, d in enumerate(info['chs'])]
-    a_chs = filter(lambda c: c[1].startswith('A'), by_name)
-    other_chs = filter(lambda c: not c[1].startswith('A'), by_name)
-    by_name = sorted(a_chs, key=lambda c: int(c[1][1:])) + sorted(other_chs)
+        sort_by_name_idx = [idx[0] for idx in sort_by_name_idx]
 
-    by_name = [idx[0] for idx in by_name]
-    info['chs'] = [chans[pos] for pos in by_name]
-    info['order'] = by_name
+        info['chs'] = [chans[pos] for pos in sort_by_name_idx]
+        info['order'] = sort_by_name_idx
+    else:
+        info['chs'] = chans
+        info['order'] = Ellipsis
 
     # finally add some important fields from the config
     info['e_table'] = cfg['user_blocks'][BTI.UB_B_E_TABLE_USED]
@@ -902,6 +981,7 @@ def _read_data(info, start=None, stop=None):
     -------
     data : ndarray
         The measurement data, a channels x time slices array.
+        The data will be cast to np.float64 for compatibility.
     """
 
     total_slices = info['total_slices']
@@ -913,18 +993,32 @@ def _read_data(info, start=None, stop=None):
     if any([start < 0, stop > total_slices, start >= stop]):
         raise RuntimeError('Invalid data range supplied:'
                            ' %d, %d' % (start, stop))
-
-    with open(info['pdf_fname'], 'rb') as fid:
+    fname = info['pdf_fname']
+    with _bti_open(fname, 'rb') as fid:
         fid.seek(info['bytes_per_slice'] * start, 0)
         cnt = (stop - start) * info['total_chans']
         shape = [stop - start, info['total_chans']]
-        data = np.fromfile(fid, dtype=info['dtype'],
-                           count=cnt).astype('f4').reshape(shape)
+
+        if isinstance(fid, six.BytesIO):
+            data = np.fromstring(fid.getvalue(),
+                                 dtype=info['dtype'], count=cnt)
+        else:
+            data = np.fromfile(fid, dtype=info['dtype'], count=cnt)
+        data = data.astype('f4').reshape(shape)
 
     for ch in info['chs']:
         data[:, ch['index']] *= ch['cal']
 
-    return data[:, info['order']].T
+    return data[:, info['order']].T.astype(np.float64)
+
+
+def _correct_trans(t):
+    """Helper to convert to a transformation matrix"""
+    t = np.array(t, np.float64)
+    t[:3, :3] *= t[3, :3][:, np.newaxis]  # apply scalings
+    t[3, :3] = 0.  # remove them
+    assert t[3, 3] == 1.
+    return t
 
 
 class RawBTi(_BaseRaw):
@@ -932,300 +1026,340 @@ class RawBTi(_BaseRaw):
 
     Parameters
     ----------
-    pdf_fname : str | None
-        absolute path to the processed data file (PDF)
-    config_fname : str | None
-        absolute path to system config file. If None, it is assumed to be in
-        the same directory.
-    head_shape_fname : str
-        absolute path to the head shape file. If None, it is assumed to be in
-        the same directory.
-    rotation_x : float | int | None
-        Degrees to tilt x-axis for sensor frame misalignment.
-        If None, no adjustment will be applied.
-    translation : array-like
+    pdf_fname : str
+        Path to the processed data file (PDF).
+    config_fname : str
+        Path to system config file.
+    head_shape_fname : str | None
+        Path to the head shape file.
+    rotation_x : float
+        Degrees to tilt x-axis for sensor frame misalignment. Ignored
+        if convert is True.
+    translation : array-like, shape (3,)
         The translation to place the origin of coordinate system
-        to the center of the head.
-    ecg_ch: str | None
-      The 4D name of the ECG channel. If None, the channel will be treated
-      as regular EEG channel.
-    eog_ch: tuple of str | None
-      The 4D names of the EOG channels. If None, the channels will be treated
-      as regular EEG channels.
+        to the center of the head. Ignored if convert is True.
+    convert : bool
+        Convert to Neuromag coordinates or not.
+    rename_channels : bool
+        Whether to keep original 4D channel labels or not. Defaults to True.
+    sort_by_ch_name : bool
+        Reorder channels according to channel label. 4D channels don't have
+        monotonically increasing numbers in their labels. Defaults to True.
+    ecg_ch : str | None
+        The 4D name of the ECG channel. If None, the channel will be treated
+        as regular EEG channel.
+    eog_ch : tuple of str | None
+        The 4D names of the EOG channels. If None, the channels will be treated
+        as regular EEG channels.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
-
-    Attributes & Methods
-    --------------------
-    See documentation for mne.io.Raw
-
     """
     @verbose
     def __init__(self, pdf_fname, config_fname='config',
-                 head_shape_fname='hs_file', rotation_x=None,
-                 translation=(0.0, 0.02, 0.11), ecg_ch='E31',
-                 eog_ch=('E63', 'E64'), verbose=None):
+                 head_shape_fname='hs_file', rotation_x=0.,
+                 translation=(0.0, 0.02, 0.11), convert=True,
+                 rename_channels=True, sort_by_ch_name=True,
+                 ecg_ch='E31', eog_ch=('E63', 'E64'),
+                 verbose=None):
+
+        info, bti_info = _get_bti_info(
+            pdf_fname=pdf_fname, config_fname=config_fname,
+            head_shape_fname=head_shape_fname, rotation_x=rotation_x,
+            translation=translation, convert=convert, ecg_ch=ecg_ch,
+            rename_channels=rename_channels,
+            sort_by_ch_name=sort_by_ch_name, eog_ch=eog_ch)
+        logger.info('Reading raw data from %s...' % pdf_fname)
+        data = _read_data(bti_info)
+        assert len(data) == len(info['ch_names'])
+        self._projector_hashes = [None]
+        self.bti_ch_labels = [c['chan_label'] for c in bti_info['chs']]
+
+        # make Raw repr work if we have a BytesIO as input
+        if isinstance(pdf_fname, six.BytesIO):
+            pdf_fname = repr(pdf_fname)
+
+        super(RawBTi, self).__init__(
+            info, data, filenames=[pdf_fname], verbose=verbose)
+        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
+                    self.first_samp, self.last_samp,
+                    float(self.first_samp) / info['sfreq'],
+                    float(self.last_samp) / info['sfreq']))
+        logger.info('Ready.')
+
 
+def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x,
+                  translation, convert, ecg_ch, eog_ch, rename_channels=True,
+                  sort_by_ch_name=True):
+
+    if pdf_fname is not None and not isinstance(pdf_fname, six.BytesIO):
         if not op.isabs(pdf_fname):
             pdf_fname = op.abspath(pdf_fname)
 
+    if not isinstance(config_fname, six.BytesIO):
         if not op.isabs(config_fname):
-            config_fname = op.join(op.dirname(pdf_fname), config_fname)
+            config_fname = op.abspath(config_fname)
 
         if not op.exists(config_fname):
             raise ValueError('Could not find the config file %s. Please check'
                              ' whether you are in the right directory '
                              'or pass the full name' % config_fname)
 
-        if not op.isabs(head_shape_fname):
-            head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname)
-
-        if not op.exists(head_shape_fname):
-            raise ValueError('Could not find the head_shape file %s. You shoul'
-                             'd check whether you are in the right directory o'
-                             'r pass the full file name.' % head_shape_fname)
-
-        logger.info('Reading 4D PDF file %s...' % pdf_fname)
-        bti_info = _read_bti_header(pdf_fname, config_fname)
-
-         # XXX indx is informed guess. Normally only one transform is stored.
-        dev_ctf_t = bti_info['bti_transform'][0].astype('>f8')
-        bti_to_nm = bti_to_vv_trans(adjust=rotation_x,
-                                    translation=translation, dtype='>f8')
-
-        use_hpi = False  # hard coded, but marked as later option.
-        logger.info('Creating Neuromag info structure ...')
-        info = Info()
-        info['bads'] = []
-        info['meas_id'] = None
-        info['file_id'] = None
-        info['projs'] = list()
-        info['comps'] = list()
+    if head_shape_fname is not None and not isinstance(
+            head_shape_fname, six.BytesIO):
+        orig_name = head_shape_fname
+        if not op.isfile(head_shape_fname):
+            head_shape_fname = op.join(op.dirname(pdf_fname),
+                                       head_shape_fname)
+
+        if not op.isfile(head_shape_fname):
+            raise ValueError('Could not find the head_shape file "%s". '
+                             'You should check whether you are in the '
+                             'right directory or pass the full file name.'
+                             % orig_name)
+
+    logger.info('Reading 4D PDF file %s...' % pdf_fname)
+    bti_info = _read_bti_header(
+        pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name)
+
+    dev_ctf_t = Transform('ctf_meg', 'ctf_head',
+                          _correct_trans(bti_info['bti_transform'][0]))
+
+    # for old backward compatibility and external processing
+    rotation_x = 0. if rotation_x is None else rotation_x
+    if convert:
+        bti_dev_t = _get_bti_dev_t(rotation_x, translation)
+    else:
+        bti_dev_t = np.eye(4)
+    bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t)
+
+    use_hpi = False  # hard coded, but marked as later option.
+    logger.info('Creating Neuromag info structure ...')
+    info = _empty_info()
+    if pdf_fname is not None:
         date = bti_info['processes'][0]['timestamp']
         info['meas_date'] = [date, 0]
         info['sfreq'] = 1e3 / bti_info['sample_period'] * 1e-3
-        info['nchan'] = len(bti_info['chs'])
-
-        # browse processing info for filter specs.
-        hp, lp = 0.0, info['sfreq'] * 0.4  # find better default
-        for proc in bti_info['processes']:
-            if 'filt' in proc['process_type']:
-                for step in proc['processing_steps']:
-                    if 'high_freq' in step:
-                        hp, lp = step['high_freq'], step['low_freq']
-                    elif 'hp' in step['process_type']:
-                        hp = step['freq']
-                    elif 'lp' in step['process_type']:
-                        lp = step['freq']
-
-        info['highpass'] = hp
-        info['lowpass'] = lp
-        info['acq_pars'], info['acq_stim'] = None, None
-        info['filename'] = None
-        chs = []
-
-        ch_names = [ch['name'] for ch in bti_info['chs']]
-        info['ch_names'] = _rename_channels(ch_names)
-        ch_mapping = zip(ch_names, info['ch_names'])
-        logger.info('... Setting channel info structure.')
-        for idx, (chan_4d, chan_vv) in enumerate(ch_mapping):
-            chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
-            chan_info['ch_name'] = chan_vv
-            chan_info['logno'] = idx + BTI.FIFF_LOGNO
-            chan_info['scanno'] = idx + 1
-            chan_info['cal'] = bti_info['chs'][idx]['scale']
-
-            if any([chan_vv.startswith(k) for k in ('MEG', 'RFG', 'RFM')]):
-                t, loc = bti_info['chs'][idx]['coil_trans'], None
-                if t is not None:
-                    t, loc = _convert_coil_trans(t.astype('>f8'), dev_ctf_t,
-                                                 bti_to_nm)
-                    if idx == 1:
+    else:  # for some use case we just want a partial info with channel geom.
+        info['meas_date'] = None
+        info['sfreq'] = None
+        bti_info['processes'] = list()
+    info['nchan'] = len(bti_info['chs'])
+
+    # browse processing info for filter specs.
+    # find better default
+    hp, lp = (0.0, info['sfreq'] * 0.4) if pdf_fname else (None, None)
+    for proc in bti_info['processes']:
+        if 'filt' in proc['process_type']:
+            for step in proc['processing_steps']:
+                if 'high_freq' in step:
+                    hp, lp = step['high_freq'], step['low_freq']
+                elif 'hp' in step['process_type']:
+                    hp = step['freq']
+                elif 'lp' in step['process_type']:
+                    lp = step['freq']
+
+    info['highpass'] = hp
+    info['lowpass'] = lp
+    info['acq_pars'] = info['acq_stim'] = info['hpi_subsystem'] = None
+    info['events'], info['hpi_results'], info['hpi_meas'] = [], [], []
+    chs = []
+
+    bti_ch_names = [ch['name'] for ch in bti_info['chs']]
+    neuromag_ch_names = _rename_channels(
+        bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch)
+    ch_mapping = zip(bti_ch_names, neuromag_ch_names)
+
+    logger.info('... Setting channel info structure.')
+    for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping):
+        chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
+        chan_info['ch_name'] = chan_neuromag if rename_channels else chan_4d
+        chan_info['logno'] = idx + BTI.FIFF_LOGNO
+        chan_info['scanno'] = idx + 1
+        chan_info['cal'] = bti_info['chs'][idx]['scale']
+
+        if any(chan_4d.startswith(k) for k in ('A', 'M', 'G')):
+            loc = bti_info['chs'][idx]['loc']
+            if loc is not None:
+                if convert:
+                    if idx == 0:
                         logger.info('... putting coil transforms in Neuromag '
                                     'coordinates')
-                chan_info['coil_trans'] = t
-                if loc is not None:
-                    chan_info['loc'] = loc.astype('>f4')
-
-            if chan_vv.startswith('MEG'):
-                chan_info['kind'] = FIFF.FIFFV_MEG_CH
-                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
-                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
-                chan_info['unit'] = FIFF.FIFF_UNIT_T
-
-            elif chan_vv.startswith('RFM'):
-                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
-                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
-                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
-                chan_info['unit'] = FIFF.FIFF_UNIT_T
-
-            elif chan_vv.startswith('RFG'):
-                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
-                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
-                chan_info['unit'] = FIFF.FIFF_UNIT_T_M
-                if chan_4d in ('GxxA', 'GyyA'):
-                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
-                elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
-                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
-
-            elif chan_vv.startswith('EEG'):
-                chan_info['kind'] = FIFF.FIFFV_EEG_CH
-                chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
-                chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
-                chan_info['unit'] = FIFF.FIFF_UNIT_V
-
-            elif chan_vv == 'STI 013':
-                chan_info['kind'] = FIFF.FIFFV_RESP_CH
-            elif chan_vv == 'STI 014':
-                chan_info['kind'] = FIFF.FIFFV_STIM_CH
-            elif chan_vv.startswith('EOG'):
-                chan_info['kind'] = FIFF.FIFFV_EOG_CH
-            elif chan_vv == 'ECG 001':
-                chan_info['kind'] = FIFF.FIFFV_ECG_CH
-            elif chan_vv.startswith('EXT'):
-                chan_info['kind'] = FIFF.FIFFV_MISC_CH
-            elif chan_vv.startswith('UTL'):
-                chan_info['kind'] = FIFF.FIFFV_MISC_CH
-
-            chs.append(chan_info)
-
-        info['chs'] = chs
-
+                    t = _loc_to_coil_trans(bti_info['chs'][idx]['loc'])
+                    t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
+                    loc = _coil_trans_to_loc(t)
+            chan_info['loc'] = loc
+
+        # BTI sensors are natively stored in 4D head coords we believe
+        meg_frame = (FIFF.FIFFV_COORD_DEVICE if convert else
+                     FIFF.FIFFV_MNE_COORD_4D_HEAD)
+        eeg_frame = (FIFF.FIFFV_COORD_HEAD if convert else
+                     FIFF.FIFFV_MNE_COORD_4D_HEAD)
+        if chan_4d.startswith('A'):
+            chan_info['kind'] = FIFF.FIFFV_MEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+        elif chan_4d.startswith('M'):
+            chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+
+        elif chan_4d.startswith('G'):
+            chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+            chan_info['coord_frame'] = meg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_T_M
+            if chan_4d in ('GxxA', 'GyyA'):
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
+            elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF
+
+        elif chan_4d.startswith('EEG'):
+            chan_info['kind'] = FIFF.FIFFV_EEG_CH
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
+            chan_info['coord_frame'] = eeg_frame
+            chan_info['unit'] = FIFF.FIFF_UNIT_V
+
+        elif chan_4d == 'RESPONSE':
+            chan_info['kind'] = FIFF.FIFFV_RESP_CH
+        elif chan_4d == 'TRIGGER':
+            chan_info['kind'] = FIFF.FIFFV_STIM_CH
+        elif chan_4d.startswith('EOG'):
+            chan_info['kind'] = FIFF.FIFFV_EOG_CH
+        elif chan_4d == ecg_ch:
+            chan_info['kind'] = FIFF.FIFFV_ECG_CH
+        elif chan_4d.startswith('X'):
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+        elif chan_4d == 'UACurrent':
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+
+        chs.append(chan_info)
+
+    info['chs'] = chs
+    info['ch_names'] = neuromag_ch_names if rename_channels else bti_ch_names
+
+    if head_shape_fname:
         logger.info('... Reading digitization points from %s' %
                     head_shape_fname)
-        logger.info('... putting digitization points in Neuromag c'
-                    'oordinates')
-        info['dig'], ctf_head_t = _setup_head_shape(head_shape_fname, use_hpi)
-        logger.info('... Computing new device to head transform.')
-        dev_head_t = _convert_dev_head_t(dev_ctf_t, bti_to_nm,
-                                         ctf_head_t)
-
-        info['dev_head_t'] = dict()
-        info['dev_head_t']['from'] = FIFF.FIFFV_COORD_DEVICE
-        info['dev_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
-        info['dev_head_t']['trans'] = dev_head_t
-        info['dev_ctf_t'] = dict()
-        info['dev_ctf_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_DEVICE
-        info['dev_ctf_t']['to'] = FIFF.FIFFV_COORD_HEAD
-        info['dev_ctf_t']['trans'] = dev_ctf_t
-        info['ctf_head_t'] = dict()
-        info['ctf_head_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_HEAD
-        info['ctf_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
-        info['ctf_head_t']['trans'] = ctf_head_t
-        logger.info('Done.')
+        if convert:
+            logger.info('... putting digitization points in Neuromag c'
+                        'oordinates')
+        info['dig'], ctf_head_t = _process_bti_headshape(
+            head_shape_fname, convert=convert, use_hpi=use_hpi)
 
-        if False:  # XXX : reminds us to support this as we go
-            # include digital weights from reference channel
-            comps = info['comps'] = list()
-            weights = bti_info['weights']
-            by_name = lambda x: x[1]
-            chn = dict(ch_mapping)
-            columns = [chn[k] for k in weights['dsp_ch_names']]
-            rows = [chn[k] for k in weights['ch_names']]
-            col_order, col_names = zip(*sorted(enumerate(columns),
-                                               key=by_name))
-            row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
-            # for some reason the C code would invert the signs, so we follow.
-            mat = -weights['dsp_wts'][row_order, :][:, col_order]
-            comp_data = dict(data=mat,
-                             col_names=col_names,
-                             row_names=row_names,
-                             nrow=mat.shape[0], ncol=mat.shape[1])
-            comps += [dict(data=comp_data, ctfkind=101,
-                           #  no idea how to calibrate, just ones.
-                           rowcals=np.ones(mat.shape[0], dtype='>f4'),
-                           colcals=np.ones(mat.shape[1], dtype='>f4'),
-                           save_calibrated=0)]
+        logger.info('... Computing new device to head transform.')
+        # DEV->CTF_DEV->CTF_HEAD->HEAD
+        if convert:
+            t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
+                                   'meg', 'ctf_head')
+            dev_head_t = combine_transforms(t, ctf_head_t, 'meg', 'head')
         else:
-            logger.warning('Warning. Currently direct inclusion of 4D weight t'
-                           'ables is not supported. For critical use cases '
-                           '\nplease take into account the MNE command '
-                           '\'mne_create_comp_data\' to include weights as '
-                           'printed out \nby the 4D \'print_table\' routine.')
-
-        # check that the info is complete
-        assert not set(RAW_INFO_FIELDS) - set(info.keys())
-
-        # check nchan is correct
-        assert len(info['ch_names']) == info['nchan']
-
-        cals = np.zeros(info['nchan'])
-        for k in range(info['nchan']):
-            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
-
-        self.verbose = verbose
-        self.cals = cals
-        self.rawdir = None
-        self.proj = None
-        self.comp = None
-        self._filenames = list()
-        self.preload = True
-        self._projector_hashes = [None]
-        self.info = info
-
-        logger.info('Reading raw data from %s...' % pdf_fname)
-        self._data = _read_data(bti_info)
-        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
-        self._raw_lengths = np.array([self._data.shape[1]])
-        self._first_samps = np.array([0])
-        self._last_samps = self._raw_lengths - 1
-        self.rawdirs = [[]]
-
-        assert len(self._data) == len(self.info['ch_names'])
-        self._times = np.arange(self.first_samp,
-                                self.last_samp + 1) / info['sfreq']
-        self._projectors = [None]
-        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' % (
-                    self.first_samp, self.last_samp,
-                    float(self.first_samp) / info['sfreq'],
-                    float(self.last_samp) / info['sfreq']))
-
-        logger.info('Ready.')
+            dev_head_t = Transform('meg', 'head', np.eye(4))
+        logger.info('Done.')
+    else:
+        logger.info('... no headshape file supplied, doing nothing.')
+        dev_head_t = Transform('meg', 'head', np.eye(4))
+        ctf_head_t = Transform('ctf_head', 'head', np.eye(4))
+    info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t,
+                ctf_head_t=ctf_head_t)
+
+    if False:  # XXX : reminds us to support this as we go
+        # include digital weights from reference channel
+        comps = info['comps'] = list()
+        weights = bti_info['weights']
+
+        def by_name(x):
+            return x[1]
+        chn = dict(ch_mapping)
+        columns = [chn[k] for k in weights['dsp_ch_names']]
+        rows = [chn[k] for k in weights['ch_names']]
+        col_order, col_names = zip(*sorted(enumerate(columns),
+                                           key=by_name))
+        row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
+        # for some reason the C code would invert the signs, so we follow.
+        mat = -weights['dsp_wts'][row_order, :][:, col_order]
+        comp_data = dict(data=mat,
+                         col_names=col_names,
+                         row_names=row_names,
+                         nrow=mat.shape[0], ncol=mat.shape[1])
+        comps += [dict(data=comp_data, ctfkind=101,
+                       #  no idea how to calibrate, just ones.
+                       rowcals=np.ones(mat.shape[0], dtype='>f4'),
+                       colcals=np.ones(mat.shape[1], dtype='>f4'),
+                       save_calibrated=0)]
+    else:
+        logger.warning('Warning. Currently direct inclusion of 4D weight t'
+                       'ables is not supported. For critical use cases '
+                       '\nplease take into account the MNE command '
+                       '\'mne_create_comp_data\' to include weights as '
+                       'printed out \nby the 4D \'print_table\' routine.')
+
+    # check that the info is complete
+    info._check_consistency()
+    return info, bti_info
 
 
 @verbose
 def read_raw_bti(pdf_fname, config_fname='config',
-                 head_shape_fname='hs_file', rotation_x=None,
-                 translation=(0.0, 0.02, 0.11), ecg_ch='E31',
-                 eog_ch=('E63', 'E64'), verbose=True):
+                 head_shape_fname='hs_file', rotation_x=0.,
+                 translation=(0.0, 0.02, 0.11), convert=True,
+                 rename_channels=True, sort_by_ch_name=True,
+                 ecg_ch='E31', eog_ch=('E63', 'E64'), verbose=None):
     """ Raw object from 4D Neuroimaging MagnesWH3600 data
 
-    Note.
-    1) Currently direct inclusion of reference channel weights
-    is not supported. Please use 'mne_create_comp_data' to include
-    the weights or use the low level functions from this module to
-    include them by yourself.
-    2) The informed guess for the 4D name is E31 for the ECG channel and
-    E63, E63 for the EOG channels. Pleas check and adjust if those channels
-    are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
-    appear in the channel names of the raw object.
+    .. note::
+        1. Currently direct inclusion of reference channel weights
+           is not supported. Please use ``mne_create_comp_data`` to include
+           the weights or use the low level functions from this module to
+           include them by yourself.
+        2. The informed guess for the 4D name is E31 for the ECG channel and
+           E63, E63 for the EOG channels. Pleas check and adjust if those
+           channels are present in your dataset but 'ECG 01' and 'EOG 01',
+           'EOG 02' don't appear in the channel names of the raw object.
 
     Parameters
     ----------
-    pdf_fname : str | None
-        absolute path to the processed data file (PDF)
-    config_fname : str | None
-        absolute path to system confnig file. If None, it is assumed to be in
-        the same directory.
-    head_shape_fname : str
-        absolute path to the head shape file. If None, it is assumed to be in
-        the same directory.
-    rotation_x : float | int | None
-        Degrees to tilt x-axis for sensor frame misalignment.
-        If None, no adjustment will be applied.
-    translation : array-like
+    pdf_fname : str
+        Path to the processed data file (PDF).
+    config_fname : str
+        Path to system config file.
+    head_shape_fname : str | None
+        Path to the head shape file.
+    rotation_x : float
+        Degrees to tilt x-axis for sensor frame misalignment. Ignored
+        if convert is True.
+    translation : array-like, shape (3,)
         The translation to place the origin of coordinate system
-        to the center of the head.
-    ecg_ch: str | None
-      The 4D name of the ECG channel. If None, the channel will be treated
-      as regular EEG channel.
-    eog_ch: tuple of str | None
-      The 4D names of the EOG channels. If None, the channels will be treated
-      as regular EEG channels.
+        to the center of the head. Ignored if convert is True.
+    convert : bool
+        Convert to Neuromag coordinates or not.
+    rename_channels : bool
+        Whether to keep original 4D channel labels or not. Defaults to True.
+    sort_by_ch_name : bool
+        Reorder channels according to channel label. 4D channels don't have
+        monotonically increasing numbers in their labels. Defaults to True.
+    ecg_ch : str | None
+        The 4D name of the ECG channel. If None, the channel will be treated
+        as regular EEG channel.
+    eog_ch : tuple of str | None
+        The 4D names of the EOG channels. If None, the channels will be treated
+        as regular EEG channels.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of RawBTi
+        A Raw object containing BTI data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
     """
     return RawBTi(pdf_fname, config_fname=config_fname,
                   head_shape_fname=head_shape_fname,
                   rotation_x=rotation_x, translation=translation,
-                  verbose=verbose)
+                  convert=convert, rename_channels=rename_channels,
+                  sort_by_ch_name=sort_by_ch_name, ecg_ch=ecg_ch,
+                  eog_ch=eog_ch, verbose=verbose)
diff --git a/mne/io/bti/constants.py b/mne/io/bti/constants.py
index 727278c..459f252 100644
--- a/mne/io/bti/constants.py
+++ b/mne/io/bti/constants.py
@@ -2,9 +2,9 @@
 #
 # License: BSD (3-clause)
 
-from ..constants import Bunch
+from ..constants import BunchConst
 
-BTI = Bunch()
+BTI = BunchConst()
 
 BTI.ELEC_STATE_NOT_COLLECTED           = 0
 BTI.ELEC_STATE_COLLECTED               = 1
@@ -97,11 +97,3 @@ BTI.UB_B_E_TABLE                       = 'B_E_TABLE'
 BTI.UB_B_WEIGHTS_USED                  = 'B_weights_used'
 BTI.UB_B_TRIG_MASK                     = 'B_trig_mask'
 BTI.UB_B_WEIGHT_TABLE                  = 'BWT_'
-#
-## transforms
-#
-BTI.T_ROT_VV = ((0, -1, 0, 0), (1, 0, 0, 0), (0, 0, 1, 0), (1, 1, 1, 1))
-BTI.T_IDENT = ((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (1, 1, 1, 1))
-BTI.T_ROT_IX = slice(0, 3), slice(0, 3)
-BTI.T_TRANS_IX = slice(0, 3), slice(3, 4)
-BTI.T_SCA_IX = slice(3, 4), slice(0, 4)
\ No newline at end of file
diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py
index aadb07c..ebc78ce 100644
--- a/mne/io/bti/read.py
+++ b/mne/io/bti/read.py
@@ -1,127 +1,120 @@
 # Authors: Denis A. Engemann  <denis.engemann at gmail.com>
 #          simplified BSD-3 license
 
-import struct
 import numpy as np
 from ...externals.six import b
 
 
-def _unpack_matrix(fid, format, rows, cols, dtype):
+def _unpack_matrix(fid, rows, cols, dtype, out_dtype):
     """ Aux Function """
-    out = np.zeros((rows, cols), dtype=dtype)
-    bsize = struct.calcsize(format)
-    string = fid.read(bsize)
-    data = struct.unpack(format, string)
-    iter_mat = [(r, c) for r in range(rows) for c in range(cols)]
-    for idx, (row, col) in enumerate(iter_mat):
-        out[row, col] = data[idx]
+    dtype = np.dtype(dtype)
 
+    string = fid.read(int(dtype.itemsize * rows * cols))
+    out = np.fromstring(string, dtype=dtype).reshape(
+        rows, cols).astype(out_dtype)
     return out
 
 
-def _unpack_simple(fid, format, count):
+def _unpack_simple(fid, dtype, out_dtype):
     """ Aux Function """
-    bsize = struct.calcsize(format)
-    string = fid.read(bsize)
-    data = list(struct.unpack(format, string))
+    dtype = np.dtype(dtype)
+    string = fid.read(dtype.itemsize)
+    out = np.fromstring(string, dtype=dtype).astype(out_dtype)
 
-    out = data if count < 2 else list(data)
     if len(out) > 0:
         out = out[0]
-
     return out
 
 
 def read_str(fid, count=1):
     """ Read string """
-    format = '>' + ('c' * count)
-    data = list(struct.unpack(format, fid.read(struct.calcsize(format))))
-
-    bytestr = b('').join(data[0:data.index(b('\x00')) if b('\x00') in data else
-                         count])
+    dtype = np.dtype('>S%i' % count)
+    string = fid.read(dtype.itemsize)
+    data = np.fromstring(string, dtype=dtype)[0]
+    bytestr = b('').join([data[0:data.index(b('\x00')) if
+                          b('\x00') in data else count]])
 
-    return str(bytestr.decode('ascii')) # Return native str type for Py2/3
+    return str(bytestr.decode('ascii'))  # Return native str type for Py2/3
 
 
 def read_char(fid, count=1):
     " Read character from bti file """
-    return _unpack_simple(fid, '>' + ('c' * count), count)
+    return _unpack_simple(fid, '>S%s' % count, 'S')
 
 
-def read_bool(fid, count=1):
+def read_bool(fid):
     """ Read bool value from bti file """
-    return _unpack_simple(fid, '>' + ('?' * count), count)
+    return _unpack_simple(fid, '>?', np.bool)
 
 
-def read_uint8(fid, count=1):
+def read_uint8(fid):
     """ Read unsigned 8bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('B' * count), count)
+    return _unpack_simple(fid, '>u1', np.uint8)
 
 
-def read_int8(fid, count=1):
+def read_int8(fid):
     """ Read 8bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('b' * count),  count)
+    return _unpack_simple(fid, '>i1', np.int8)
 
 
-def read_uint16(fid, count=1):
+def read_uint16(fid):
     """ Read unsigned 16bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('H' * count), count)
+    return _unpack_simple(fid, '>u2', np.uint16)
 
 
-def read_int16(fid, count=1):
+def read_int16(fid):
     """ Read 16bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('H' * count),  count)
+    return _unpack_simple(fid, '>i2', np.int16)
 
 
-def read_uint32(fid, count=1):
+def read_uint32(fid):
     """ Read unsigned 32bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('I' * count), count)
+    return _unpack_simple(fid, '>u4', np.uint32)
 
 
-def read_int32(fid, count=1):
+def read_int32(fid):
     """ Read 32bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('i' * count), count)
+    return _unpack_simple(fid, '>i4', np.int32)
 
 
-def read_uint64(fid, count=1):
+def read_uint64(fid):
     """ Read unsigned 64bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('Q' * count), count)
+    return _unpack_simple(fid, '>u8', np.uint64)
 
 
-def read_int64(fid, count=1):
+def read_int64(fid):
     """ Read 64bit integer from bti file """
-    return _unpack_simple(fid, '>' + ('q' * count), count)
+    return _unpack_simple(fid, '>u8', np.int64)
 
 
-def read_float(fid, count=1):
+def read_float(fid):
     """ Read 32bit float from bti file """
-    return _unpack_simple(fid, '>' + ('f' * count), count)
+    return _unpack_simple(fid, '>f4', np.float32)
 
 
-def read_double(fid, count=1):
+def read_double(fid):
     """ Read 64bit float from bti file """
-    return _unpack_simple(fid, '>' + ('d' * count), count)
+    return _unpack_simple(fid, '>f8', np.float64)
 
 
 def read_int16_matrix(fid, rows, cols):
     """ Read 16bit integer matrix from bti file """
-    format = '>' + ('h' * rows * cols)
-    return _unpack_matrix(fid, format, rows, cols, np.int16)
+    return _unpack_matrix(fid, rows, cols, dtype='>i2',
+                          out_dtype=np.int16)
 
 
 def read_float_matrix(fid, rows, cols):
     """ Read 32bit float matrix from bti file """
-    format = '>' + ('f' * rows * cols)
-    return _unpack_matrix(fid, format, rows, cols, 'f4')
+    return _unpack_matrix(fid, rows, cols, dtype='>f4',
+                          out_dtype=np.float32)
 
 
 def read_double_matrix(fid, rows, cols):
     """ Read 64bit float matrix from bti file """
-    format = '>' + ('d' * rows * cols)
-    return _unpack_matrix(fid, format, rows, cols, 'f8')
+    return _unpack_matrix(fid, rows, cols, dtype='>f8',
+                          out_dtype=np.float64)
 
 
 def read_transform(fid):
     """ Read 64bit float matrix transform from bti file """
-    format = '>' + ('d' * 4 * 4)
-    return _unpack_matrix(fid, format, 4, 4, 'f8')
+    return read_double_matrix(fid, rows=4, cols=4)
diff --git a/mne/io/bti/tests/test_bti.py b/mne/io/bti/tests/test_bti.py
index 6750112..5419d6c 100644
--- a/mne/io/bti/tests/test_bti.py
+++ b/mne/io/bti/tests/test_bti.py
@@ -5,17 +5,25 @@ from __future__ import print_function
 
 import os
 import os.path as op
+from functools import reduce
 
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_equal
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose)
 from nose.tools import assert_true, assert_raises, assert_equal
 
-from mne.io import Raw as Raw
-from mne.io.bti.bti import (_read_config, _setup_head_shape,
-                            _read_data, _read_bti_header)
-from mne.io import read_raw_bti
-from mne.utils import _TempDir
-from functools import reduce
+from mne.io import Raw, read_raw_bti
+from mne.io.bti.bti import (_read_config, _process_bti_headshape,
+                            _read_data, _read_bti_header, _get_bti_dev_t,
+                            _correct_trans, _get_bti_info)
+from mne.io.pick import pick_info
+from mne.io.constants import FIFF
+from mne import concatenate_raws, pick_types
+from mne.utils import run_tests_if_main
+from mne.transforms import Transform, combine_transforms, invert_transform
+from mne.externals import six
+from mne.fixes import partial
+
 
 base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
 
@@ -23,9 +31,9 @@ archs = 'linux', 'solaris'
 pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
 config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
 hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
-exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a) for a in archs]
+exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
+                   for a in archs]
 tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
-tempdir = _TempDir()
 
 # the 4D exporter doesn't export all channels, so we confine our comparison
 NCH = 248
@@ -36,8 +44,8 @@ def test_read_config():
     # for config in config_fname, config_solaris_fname:
     for config in config_fnames:
         cfg = _read_config(config)
-        assert_true(all([all([k not in block.lower() for k in ['', 'unknown']]
-                    for block in cfg['user_blocks'])]))
+        assert_true(all('unknown' not in block.lower() and block != ''
+                        for block in cfg['user_blocks']))
 
 
 def test_read_pdf():
@@ -49,9 +57,10 @@ def test_read_pdf():
         assert_true(data.shape == shape)
 
 
-def test_crop():
-    """ Test crop raw """
+def test_crop_append():
+    """ Test crop and append raw """
     raw = read_raw_bti(pdf_fnames[0], config_fnames[0], hs_fnames[0])
+    raw.load_data()  # currently does nothing
     y, t = raw[:]
     t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
     mask = (t0 <= t) * (t <= t1)
@@ -60,10 +69,36 @@ def test_crop():
     assert_true(y_.shape[1] == mask.sum())
     assert_true(y_.shape[0] == y.shape[0])
 
+    raw2 = raw.copy()
+    assert_raises(RuntimeError, raw.append, raw2, preload=False)
+    raw.append(raw2)
+    assert_allclose(np.tile(raw2[:, :][0], (1, 2)), raw[:, :][0])
+
+
+def test_transforms():
+    """ Test transformations """
+    bti_trans = (0.0, 0.02, 0.11)
+    bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
+    for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw = read_raw_bti(pdf, config, hs)
+        dev_ctf_t = raw.info['dev_ctf_t']
+        dev_head_t_old = raw.info['dev_head_t']
+        ctf_head_t = raw.info['ctf_head_t']
+
+        # 1) get BTI->Neuromag
+        bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
+
+        # 2) get Neuromag->BTI head
+        t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
+                               'meg', 'ctf_head')
+        # 3) get Neuromag->head
+        dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
+
+        assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
+
 
 def test_raw():
     """ Test bti conversion to Raw object """
-
     for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
                                          exported_fnames):
         # rx = 2 if 'linux' in pdf else 0
@@ -71,38 +106,153 @@ def test_raw():
         assert_raises(ValueError, read_raw_bti, pdf, config, 'spam')
         if op.exists(tmp_raw_fname):
             os.remove(tmp_raw_fname)
-        with Raw(exported, preload=True) as ex:
-            with read_raw_bti(pdf, config, hs) as ra:
-                assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
-                assert_array_almost_equal(ex.info['dev_head_t']['trans'],
-                                          ra.info['dev_head_t']['trans'], 7)
-                dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
-                              for r_ in (ra, ex)]
-                assert_array_equal(dig1, dig2)
-
-                coil1, coil2 = [np.concatenate([d['coil_trans'].flatten()
-                                for d in r_.info['chs'][:NCH]])
-                                for r_ in (ra, ex)]
-                assert_array_almost_equal(coil1, coil2, 7)
-
-                loc1, loc2 = [np.concatenate([d['loc'].flatten()
-                              for d in r_.info['chs'][:NCH]])
-                              for r_ in (ra, ex)]
-                assert_array_equal(loc1, loc2)
-
-                assert_array_equal(ra._data[:NCH], ex._data[:NCH])
-                assert_array_equal(ra.cals[:NCH], ex.cals[:NCH])
-                ra.save(tmp_raw_fname)
-            with Raw(tmp_raw_fname) as r:
-                print(r)
+        ex = Raw(exported, preload=True)
+        ra = read_raw_bti(pdf, config, hs)
+        assert_true('RawBTi' in repr(ra))
+        assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
+        assert_array_almost_equal(ex.info['dev_head_t']['trans'],
+                                  ra.info['dev_head_t']['trans'], 7)
+        dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
+                      for r_ in (ra, ex)]
+        assert_array_almost_equal(dig1, dig2, 18)
+        coil1, coil2 = [np.concatenate([d['loc'].flatten()
+                        for d in r_.info['chs'][:NCH]])
+                        for r_ in (ra, ex)]
+        assert_array_almost_equal(coil1, coil2, 7)
+
+        loc1, loc2 = [np.concatenate([d['loc'].flatten()
+                      for d in r_.info['chs'][:NCH]])
+                      for r_ in (ra, ex)]
+        assert_allclose(loc1, loc2)
+
+        assert_array_equal(ra._data[:NCH], ex._data[:NCH])
+        assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
+
+        # check our transforms
+        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
+            if ex.info[key] is None:
+                pass
+            else:
+                assert_true(ra.info[key] is not None)
+                for ent in ('to', 'from', 'trans'):
+                    assert_allclose(ex.info[key][ent],
+                                    ra.info[key][ent])
+
+        # Make sure concatenation works
+        raw_concat = concatenate_raws([ra.copy(), ra])
+        assert_equal(raw_concat.n_times, 2 * ra.n_times)
+
+        ra.save(tmp_raw_fname)
+        re = Raw(tmp_raw_fname)
+        print(re)
+        for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
+            assert_true(isinstance(re.info[key], dict))
+            this_t = re.info[key]['trans']
+            assert_equal(this_t.shape, (4, 4))
+            # cehck that matrix by is not identity
+            assert_true(not np.allclose(this_t, np.eye(4)))
         os.remove(tmp_raw_fname)
 
 
+def test_info_no_rename_no_reorder():
+    """ Test private renaming and reordering option """
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        info, bti_info = _get_bti_info(
+            pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
+            rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
+            ecg_ch='E31', eog_ch=('E63', 'E64'),
+            rename_channels=False, sort_by_ch_name=False)
+        assert_equal(info['ch_names'],
+                     [ch['ch_name'] for ch in info['chs']])
+        assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
+                     ['A22', 'A2', 'A104', 'A241', 'A138'])
+        assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
+                     ['A133', 'A158', 'A44', 'A134', 'A216'])
+
+
+def test_no_conversion():
+    """ Test bti no-conversion option """
+
+    get_info = partial(
+        _get_bti_info,
+        pdf_fname=None,  # test skipping no pdf
+        rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
+        ecg_ch='E31', eog_ch=('E63', 'E64'),
+        rename_channels=False, sort_by_ch_name=False)
+
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw_info, _ = get_info(
+            config_fname=config, head_shape_fname=hs, convert=False)
+        raw_info_con = read_raw_bti(
+            pdf_fname=pdf,
+            config_fname=config, head_shape_fname=hs, convert=True).info
+
+        pick_info(raw_info_con,
+                  pick_types(raw_info_con, meg=True, ref_meg=True),
+                  copy=False)
+        pick_info(raw_info,
+                  pick_types(raw_info, meg=True, ref_meg=True), copy=False)
+        bti_info = _read_bti_header(pdf, config)
+        dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])
+        assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])
+        assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))
+        assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))
+        dig, t = _process_bti_headshape(hs, convert=False, use_hpi=False)
+        assert_array_equal(t['trans'], np.eye(4))
+
+        for ii, (old, new, con) in enumerate(zip(
+                dig, raw_info['dig'], raw_info_con['dig'])):
+            assert_equal(old['ident'], new['ident'])
+            assert_array_equal(old['r'], new['r'])
+            assert_true(not np.allclose(old['r'], con['r']))
+
+            if ii > 10:
+                break
+
+        ch_map = dict((ch['chan_label'],
+                       ch['loc']) for ch in bti_info['chs'])
+
+        for ii, ch_label in enumerate(raw_info['ch_names']):
+            if not ch_label.startswith('A'):
+                continue
+            t1 = ch_map[ch_label]  # correction already performed in bti_info
+            t2 = raw_info['chs'][ii]['loc']
+            t3 = raw_info_con['chs'][ii]['loc']
+            assert_allclose(t1, t2, atol=1e-15)
+            assert_true(not np.allclose(t1, t3))
+            idx_a = raw_info_con['ch_names'].index('MEG 001')
+            idx_b = raw_info['ch_names'].index('A22')
+            assert_equal(
+                raw_info_con['chs'][idx_a]['coord_frame'],
+                FIFF.FIFFV_COORD_DEVICE)
+            assert_equal(
+                raw_info['chs'][idx_b]['coord_frame'],
+                FIFF.FIFFV_MNE_COORD_4D_HEAD)
+
+
+def test_bytes_io():
+    """ Test bti bytes-io API """
+    for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
+        raw = read_raw_bti(pdf, config, hs, convert=True)
+
+        with open(pdf, 'rb') as fid:
+            pdf = six.BytesIO(fid.read())
+        with open(config, 'rb') as fid:
+            config = six.BytesIO(fid.read())
+        with open(hs, 'rb') as fid:
+            hs = six.BytesIO(fid.read())
+        raw2 = read_raw_bti(pdf, config, hs, convert=True)
+        repr(raw2)
+        assert_array_equal(raw._data, raw2._data)
+
+
 def test_setup_headshape():
     """ Test reading bti headshape """
     for hs in hs_fnames:
-        dig, t = _setup_head_shape(hs)
+        dig, t = _process_bti_headshape(hs)
         expected = set(['kind', 'ident', 'r'])
         found = set(reduce(lambda x, y: list(x) + list(y),
                            [d.keys() for d in dig]))
         assert_true(not expected - found)
+
+run_tests_if_main()
diff --git a/mne/io/bti/transforms.py b/mne/io/bti/transforms.py
deleted file mode 100644
index a19b83b..0000000
--- a/mne/io/bti/transforms.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Authors: Denis A. Engemann  <denis.engemann at gmail.com>
-#
-#          simplified BSD-3 license
-
-
-import numpy as np
-from .constants import BTI
-
-
-def bti_identity_trans(dtype='>f8'):
-    """ Get BTi identity transform
-
-    Parameters
-    ----------
-    dtype : str | dtype object
-        The data format of the transform
-
-    Returns
-    -------
-    itrans : ndarray
-        The 4 x 4 transformation matrix.
-    """
-    return np.array(BTI.T_IDENT, dtype=dtype)
-
-
-def bti_to_vv_trans(adjust=None, translation=(0.0, 0.02, 0.11), dtype='>f8'):
-    """ Get the general Magnes3600WH to Neuromag coordinate transform
-
-    Parameters
-    ----------
-    adjust : int | None
-        Degrees to tilt x-axis for sensor frame misalignment.
-        If None, no adjustment will be applied.
-    translation : array-like
-        The translation to place the origin of coordinate system
-        to the center of the head.
-
-    Returns
-    -------
-    m_nm_t : ndarray
-        4 x 4 rotation, translation, scaling matrix.
-    """
-    flip_t = np.array(BTI.T_ROT_VV, dtype=dtype)
-    adjust_t = bti_identity_trans(dtype=dtype)
-    adjust = 0 if adjust is None else adjust
-    deg = np.deg2rad(np.float64(adjust))
-    adjust_t[[1, 2], [1, 2]] = np.cos(deg)
-    adjust_t[[1, 2], [2, 1]] = -np.sin(deg), np.sin(deg)
-    m_nm_t = np.ones([4, 4], dtype=dtype)
-    m_nm_t[BTI.T_ROT_IX] = np.dot(flip_t[BTI.T_ROT_IX],
-                                  adjust_t[BTI.T_ROT_IX])
-    m_nm_t[BTI.T_TRANS_IX] = np.matrix(translation, dtype=dtype).T
-
-    return m_nm_t
-
-
-def bti_to_vv_coil_trans(ch_t, bti_t, nm_t, nm_default_scale=True):
-    """ transforms 4D coil position to fiff / Neuromag
-    """
-    nm_coil_trans = apply_trans(inverse_trans(ch_t, bti_t), nm_t)
-
-    if nm_default_scale:
-        nm_coil_trans[3, :3] = 0.
-
-    return nm_coil_trans
-
-
-def inverse_trans(x, t, rot=BTI.T_ROT_IX, trans=BTI.T_TRANS_IX,
-                  scal=BTI.T_SCA_IX):
-    """ Undo a transform
-    """
-    x = x.copy()
-    x[scal] *= t[scal]
-    x[rot] = np.dot(t[rot].T, x[rot])
-    x[trans] -= t[trans]
-    x[trans] = np.dot(t[rot].T, x[trans])
-
-    return x
-
-
-def apply_trans(x, t, rot=BTI.T_ROT_IX, trans=BTI.T_TRANS_IX,
-                scal=BTI.T_SCA_IX):
-    """ Apply a transform
-    """
-    x = x.copy()
-    x[rot] = np.dot(t[rot], x[rot])
-    x[trans] = np.dot(t[rot], x[trans])
-    x[trans] += t[trans]
-    x[scal] *= t[scal]
-
-    return x
-
-
-def merge_trans(t1, t2, dtype='>f8'):
-    """ Merge two transforms
-    """
-    t = bti_identity_trans(dtype=dtype)
-    t[BTI.T_ROT_IX] = np.dot(t1[BTI.T_ROT_IX], t2[BTI.T_ROT_IX])
-    t[BTI.T_TRANS_IX] = np.dot(t1[BTI.T_ROT_IX], t2[BTI.T_TRANS_IX])
-    t[BTI.T_TRANS_IX] += t1[BTI.T_TRANS_IX]
-
-    return t
diff --git a/mne/io/constants.py b/mne/io/constants.py
index afa431b..9db2ae8 100644
--- a/mne/io/constants.py
+++ b/mne/io/constants.py
@@ -3,6 +3,7 @@
 #
 # License: BSD (3-clause)
 
+
 class Bunch(dict):
     """ Container object for datasets: dictionnary-like object that
         exposes its keys as attributes.
@@ -12,8 +13,15 @@ class Bunch(dict):
         dict.__init__(self, kwargs)
         self.__dict__ = self
 
-FIFF = Bunch()
 
+class BunchConst(Bunch):
+    """Class to prevent us from re-defining constants (DRY)"""
+    def __setattr__(self, attr, val):
+        if attr != '__dict__' and hasattr(self, attr):
+            raise AttributeError('Attribute "%s" already set' % attr)
+        super(BunchConst, self).__setattr__(attr, val)
+
+FIFF = BunchConst()
 #
 # Blocks
 #
@@ -56,10 +64,14 @@ FIFF.FIFFB_MRI_SCENE          = 204     # Which are actually 3D scenes...
 FIFF.FIFFB_MRI_SEG            = 205     # MRI segmentation data
 FIFF.FIFFB_MRI_SEG_REGION     = 206     # One MRI segmentation region
 FIFF.FIFFB_PROCESSING_HISTORY = 900
+FIFF.FIFFB_PROCESSING_RECORD  = 901
+
+FIFF.FIFFB_CHANNEL_DECOUPLER  = 501
 FIFF.FIFFB_SSS_INFO           = 502
-FIFF.FIFFB_SSS_CAL_ADJUST     = 503
+FIFF.FIFFB_SSS_CAL            = 503
 FIFF.FIFFB_SSS_ST_INFO        = 504
 FIFF.FIFFB_SSS_BASES          = 505
+FIFF.FIFFB_SMARTSHIELD        = 510
 #
 # Of general interest
 #
@@ -124,9 +136,11 @@ FIFF.FIFF_NAME           = 233          # Intended to be a short name.
 FIFF.FIFF_DESCRIPTION    = FIFF.FIFF_COMMENT # (Textual) Description of an object
 FIFF.FIFF_DIG_STRING     = 234          # String of digitized points
 FIFF.FIFF_LINE_FREQ      = 235    # Line frequency
+FIFF.FIFF_CUSTOM_REF     = 236    # Whether a custom reference was applied to the data (NB: overlaps with HPI const #)
 #
 # HPI fitting program tags
 #
+FIFF.FIFF_HPI_COIL_FREQ          = 236   # HPI coil excitation frequency
 FIFF.FIFF_HPI_COIL_MOMENTS       = 240   # Estimated moment vectors for the HPI coil magnetic dipoles
 FIFF.FIFF_HPI_FIT_GOODNESS       = 241   # Three floats indicating the goodness of fit
 FIFF.FIFF_HPI_FIT_ACCEPT         = 242   # Bitmask indicating acceptance (see below)
@@ -153,6 +167,7 @@ FIFF.FIFFV_EMG_CH     = 302
 FIFF.FIFFV_ECG_CH     = 402
 FIFF.FIFFV_MISC_CH    = 502
 FIFF.FIFFV_RESP_CH    = 602  # Respiration monitoring
+FIFF.FIFFV_SEEG_CH    = 702  # stereotactic EEG
 FIFF.FIFFV_SYST_CH    = 900  # some system status information (on Triux systems only)
 FIFF.FIFFV_IAS_CH     = 910  # Internal Active Shielding data (maybe on Triux only)
 FIFF.FIFFV_EXCI_CH    = 920  # flux excitation channel used to be a stimulus channel
@@ -186,9 +201,6 @@ FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
 #
 # Needed for raw and evoked-response data
 #
-FIFF.FIFF_FIRST_SAMPLE   = 208
-FIFF.FIFF_LAST_SAMPLE    = 209
-FIFF.FIFF_ASPECT_KIND    = 210
 FIFF.FIFF_DATA_BUFFER    = 300    # Buffer containing measurement data
 FIFF.FIFF_DATA_SKIP      = 301    # Data skip in buffers
 FIFF.FIFF_EPOCH          = 302    # Buffer containing one epoch and channel
@@ -216,8 +228,11 @@ FIFF.FIFF_PROJ_AIM          = 502
 FIFF.FIFF_PROJ_PERSONS      = 503
 FIFF.FIFF_PROJ_COMMENT      = 504
 
-FIFF.FIFF_EVENT_CHANNELS    = 600  # Event channel numbers */
+FIFF.FIFF_EVENT_CHANNELS    = 600  # Event channel numbers
 FIFF.FIFF_EVENT_LIST        = 601  # List of events (integers: <sample before after>
+FIFF.FIFF_EVENT_CHANNEL     = 602  # Event channel
+FIFF.FIFF_EVENT_BITS        = 603  # Event bits array
+
 #
 # Tags used in saving SQUID characteristics etc.
 #
@@ -261,8 +276,6 @@ FIFF.FIFFV_BEM_SURF_ID_BRAIN      = 1
 FIFF.FIFFV_BEM_SURF_ID_SKULL      = 3
 FIFF.FIFFV_BEM_SURF_ID_HEAD       = 4
 
-FIFF.FIFFB_BEM                  = 310  # BEM data
-FIFF.FIFFB_BEM_SURF             = 311  # One of the surfaces
 FIFF.FIFF_BEM_SURF_ID           = 3101  # int    surface number
 FIFF.FIFF_BEM_SURF_NAME         = 3102  # string surface name
 FIFF.FIFF_BEM_SURF_NNODE        = 3103  # int    number of nodes on a surface
@@ -433,6 +446,9 @@ FIFF.FIFF_MNE_COV_DIAG               = 3533  # Diagonal matrix
 FIFF.FIFF_MNE_COV_EIGENVALUES        = 3534  # Eigenvalues and eigenvectors of the above
 FIFF.FIFF_MNE_COV_EIGENVECTORS       = 3535
 FIFF.FIFF_MNE_COV_NFREE              = 3536  # Number of degrees of freedom
+FIFF.FIFF_MNE_COV_METHOD             = 3537  # The estimator used
+FIFF.FIFF_MNE_COV_SCORE              = 3538  # Negative log-likelihood
+
 #
 # 3540... Inverse operator
 #
@@ -495,6 +511,22 @@ FIFF.FIFF_MNE_ICA_MATRIX            = 3607     # ICA unmixing matrix
 FIFF.FIFF_MNE_ICA_BADS              = 3608     # ICA bad sources
 FIFF.FIFF_MNE_ICA_MISC_PARAMS       = 3609     # ICA misc params
 #
+# Maxfilter tags
+#
+FIFF.FIFF_SSS_FRAME                 = 263
+FIFF.FIFF_SSS_JOB                   = 264
+FIFF.FIFF_SSS_ORIGIN                = 265
+FIFF.FIFF_SSS_ORD_IN                = 266
+FIFF.FIFF_SSS_ORD_OUT               = 267
+FIFF.FIFF_SSS_NMAG                  = 268
+FIFF.FIFF_SSS_COMPONENTS            = 269
+FIFF.FIFF_SSS_CAL_CHANS             = 270
+FIFF.FIFF_SSS_CAL_CORRS             = 271
+FIFF.FIFF_SSS_ST_CORR               = 272
+FIFF.FIFF_SSS_NFREE                 = 278
+FIFF.FIFF_SSS_ST_LENGTH             = 279
+FIFF.FIFF_DECOUPLER_MATRIX          = 800
+#
 # Fiff values associated with MNE computations
 #
 FIFF.FIFFV_MNE_UNKNOWN_ORI          = 0
@@ -601,9 +633,6 @@ FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
 FIFF.FIFFV_COIL_BABY_GRAD               = 7001
 FIFF.FIFFV_COIL_BABY_MAG                = 7002
 FIFF.FIFFV_COIL_BABY_REF_MAG            = 7003
-FIFF.FIFFV_REF_MEG_CH                   = 301
-FIFF.FIFF_UNIT_AM_M2  = 203 # Am/m^2
-FIFF.FIFF_UNIT_AM_M3  = 204	 # Am/m^3
 #
 #   FWD Types
 #
@@ -725,18 +754,7 @@ FIFF.FIFF_UNITM_N    = -9
 FIFF.FIFF_UNITM_P    = -12
 FIFF.FIFF_UNITM_F    = -15
 FIFF.FIFF_UNITM_A    = -18
-#
-# Digitization point details
-#
-FIFF.FIFFV_POINT_CARDINAL = 1
-FIFF.FIFFV_POINT_HPI      = 2
-FIFF.FIFFV_POINT_EEG      = 3
-FIFF.FIFFV_POINT_ECG      = FIFF.FIFFV_POINT_EEG
-FIFF.FIFFV_POINT_EXTRA    = 4
 
-FIFF.FIFFV_POINT_LPA      = 1
-FIFF.FIFFV_POINT_NASION   = 2
-FIFF.FIFFV_POINT_RPA      = 3
 #
 # Coil types
 #
@@ -769,8 +787,6 @@ FIFF.FIFFV_COIL_MAGNES_GRAD        = 4002  # Magnes WH gradiometer
 FIFF.FIFFV_COIL_MAGNES_R_MAG       = 4003  # Magnes WH reference magnetometer
 FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA  = 4004  # Magnes WH reference diagonal gradioometer
 FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF  = 4005  # Magnes WH reference off-diagonal gradiometer
-FIFF.FIFFV_COIL_CTF_GRAD           = 5001  # CTF axial gradiometer
-FIFF.FIFFV_COIL_KIT_GRAD           = 6001  # KIT system axial gradiometer
 
 # MNE RealTime
 FIFF.FIFF_MNE_RT_COMMAND           = 3700  # realtime command
diff --git a/mne/io/ctf.py b/mne/io/ctf.py
index 3f789f6..3bdb8e8 100644
--- a/mne/io/ctf.py
+++ b/mne/io/ctf.py
@@ -11,6 +11,8 @@ import numpy as np
 from .constants import FIFF
 from .tag import find_tag, has_tag, read_tag
 from .tree import dir_tree_find
+from .write import start_block, end_block, write_int
+from .matrix import write_named_matrix
 
 from ..utils import logger, verbose
 
@@ -52,7 +54,7 @@ def _read_named_matrix(fid, node, matkind):
     else:
         if not has_tag(node, matkind):
             raise ValueError('Desired named matrix (kind = %d) not available'
-                                                                    % matkind)
+                             % matkind)
 
     #   Read everything we need
     tag = find_tag(fid, node, matkind)
@@ -179,10 +181,10 @@ def read_ctf_comp(fid, node, chs, verbose=None):
                 p = ch_names.count(mat['col_names'][col])
                 if p == 0:
                     raise Exception('Channel %s is not available in data'
-                                                % mat['col_names'][col])
+                                    % mat['col_names'][col])
                 elif p > 1:
                     raise Exception('Ambiguous channel %s' %
-                                                        mat['col_names'][col])
+                                    mat['col_names'][col])
                 idx = ch_names.index(mat['col_names'][col])
                 col_cals[col] = 1.0 / (chs[idx]['range'] * chs[idx]['cal'])
 
@@ -192,10 +194,10 @@ def read_ctf_comp(fid, node, chs, verbose=None):
                 p = ch_names.count(mat['row_names'][row])
                 if p == 0:
                     raise Exception('Channel %s is not available in data'
-                                               % mat['row_names'][row])
+                                    % mat['row_names'][row])
                 elif p > 1:
                     raise Exception('Ambiguous channel %s' %
-                                                mat['row_names'][row])
+                                    mat['row_names'][row])
                 idx = ch_names.index(mat['row_names'][row])
                 row_cals[row] = chs[idx]['range'] * chs[idx]['cal']
 
@@ -219,10 +221,6 @@ def read_ctf_comp(fid, node, chs, verbose=None):
 ###############################################################################
 # Writing
 
-from .write import start_block, end_block, write_int
-from .matrix import write_named_matrix
-
-
 def write_ctf_comp(fid, comps):
     """Write the CTF compensation data into a fif file
 
@@ -249,8 +247,8 @@ def write_ctf_comp(fid, comps):
         if not comp['save_calibrated']:
             # Undo calibration
             comp = deepcopy(comp)
-            data = ((1. / comp['rowcals'][:, None]) * comp['data']['data']
-                    * (1. / comp['colcals'][None, :]))
+            data = ((1. / comp['rowcals'][:, None]) * comp['data']['data'] *
+                    (1. / comp['colcals'][None, :]))
             comp['data']['data'] = data
         write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data'])
         end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
diff --git a/mne/io/diff.py b/mne/io/diff.py
index ac39412..9e1fd1c 100644
--- a/mne/io/diff.py
+++ b/mne/io/diff.py
@@ -18,7 +18,7 @@ def is_equal(first, second, verbose=None):
         all_equal = False
     if isinstance(first, dict):
         for key in first.keys():
-            if (not key in second):
+            if (key not in second):
                 logger.info("Missing key %s in %s" % (key, second))
                 all_equal = False
             else:
diff --git a/mne/io/edf/__init__.py b/mne/io/edf/__init__.py
index c332e4f..f712d3d 100644
--- a/mne/io/edf/__init__.py
+++ b/mne/io/edf/__init__.py
@@ -1,6 +1,6 @@
 """EDF+,BDF module for conversion to FIF"""
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py
index ce755ed..01509c4 100644
--- a/mne/io/edf/edf.py
+++ b/mne/io/edf/edf.py
@@ -1,8 +1,8 @@
-"""Conversion tool from EDF+,BDF to FIF
+"""Conversion tool from EDF, EDF+, BDF to FIF
 
 """
 
-# Authors: Teon Brooks <teon at nyu.edu>
+# Authors: Teon Brooks <teon.brooks at gmail.com>
 #          Martin Billinger <martin.billinger at tugraz.at>
 #
 # License: BSD (3-clause)
@@ -15,309 +15,294 @@ import warnings
 from math import ceil, floor
 
 import numpy as np
-from scipy.interpolate import interp1d
 
-from ...transforms import als_ras_trans_mm, apply_trans
 from ...utils import verbose, logger
-from ..base import _BaseRaw
-from ..meas_info import Info
+from ..base import _BaseRaw, _check_update_montage
+from ..meas_info import _empty_info
+from ..pick import pick_types
 from ..constants import FIFF
-from ...coreg import get_ras_to_neuromag_trans
 from ...filter import resample
 from ...externals.six.moves import zip
 
 
 class RawEDF(_BaseRaw):
-    """Raw object from EDF+,BDF file
+    """Raw object from EDF, EDF+, BDF file
 
     Parameters
     ----------
     input_fname : str
         Path to the EDF+,BDF file.
-
-    n_eeg : int | None
-        Number of EEG electrodes.
-        If None, all channels are considered EEG.
-
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
     stim_channel : str | int | None
         The channel name or channel index (starting at 0).
         -1 corresponds to the last channel (default).
         If None, there will be no stim channel added.
-
     annot : str | None
         Path to annotation file.
         If None, no derived stim channel will be added (for files requiring
         annotation file to interpret stim channel).
-
     annotmap : str | None
         Path to annotation map file containing mapping from label to trigger.
         Must be specified if annot is not None.
-
-    tal_channel : int | None
-        The channel index (starting at 0).
-        Index of the channel containing EDF+ annotations.
-        -1 corresponds to the last channel.
-        If None, the annotation channel is not used.
-        Note: this is overruled by the annotation file if specified.
-
-    hpts : str | None
-        Path to the hpts file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
-    There is an assumption that the data are arranged such that EEG channels
-    appear first then miscellaneous channels (EOGs, AUX, STIM).
-    The stimulus channel is saved as 'STI 014'
-
     See Also
     --------
     mne.io.Raw : Documentation of attribute and methods.
     """
     @verbose
-    def __init__(self, input_fname, n_eeg=None, stim_channel=-1, annot=None,
-                 annotmap=None, tal_channel=None, hpts=None, preload=False,
-                 verbose=None):
+    def __init__(self, input_fname, montage, eog=None, misc=None,
+                 stim_channel=-1, annot=None, annotmap=None,
+                 preload=False, verbose=None):
         logger.info('Extracting edf Parameters from %s...' % input_fname)
         input_fname = os.path.abspath(input_fname)
-        self.info, self._edf_info = _get_edf_info(input_fname, n_eeg,
-                                                  stim_channel, annot,
-                                                  annotmap, tal_channel,
-                                                  hpts, preload)
+        info, edf_info = _get_edf_info(input_fname, stim_channel,
+                                       annot, annotmap,
+                                       eog, misc, preload)
         logger.info('Creating Raw.info structure...')
+        _check_update_montage(info, montage)
 
         if bool(annot) != bool(annotmap):
             warnings.warn(("Stimulus Channel will not be annotated. "
                            "Both 'annot' and 'annotmap' must be specified."))
 
         # Raw attributes
-        self.verbose = verbose
-        self.preload = False
-        self._filenames = list()
-        self._projector = None
-        self.first_samp = 0
-        self.last_samp = self._edf_info['nsamples'] - 1
-        self.comp = None  # no compensation for EDF
-        self.proj = False
-        self._first_samps = np.array([self.first_samp])
-        self._last_samps = np.array([self.last_samp])
-        self._raw_lengths = np.array([self._edf_info['nsamples']])
-        self.rawdirs = np.array([])
-        self.cals = np.array([])
-        self.orig_format = 'int'
-
-        if preload:
-            self.preload = preload
-            logger.info('Reading raw data from %s...' % input_fname)
-            self._data, _ = self._read_segment()
-            assert len(self._data) == self.info['nchan']
-
-            # Add time info
-            self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
-            self._times = np.arange(self.first_samp, self.last_samp + 1,
-                                    dtype=np.float64)
-            self._times /= self.info['sfreq']
-            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
-                        % (self.first_samp, self.last_samp,
-                           float(self.first_samp) / self.info['sfreq'],
-                           float(self.last_samp) / self.info['sfreq']))
+        last_samps = [edf_info['nsamples'] - 1]
+        super(RawEDF, self).__init__(
+            info, preload, filenames=[input_fname], raw_extras=[edf_info],
+            last_samps=last_samps, orig_format='int',
+            verbose=verbose)
+
         logger.info('Ready.')
 
-    def __repr__(self):
+    @verbose
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
+        from scipy.interpolate import interp1d
+        if mult is not None:
+            # XXX "cals" here does not function the same way as in RawFIF,
+            # and for efficiency we want to be able to combine mult and cals
+            # so proj support will have to wait until this is resolved
+            raise NotImplementedError('mult is not supported yet')
+        # RawFIF and RawEDF think of "stop" differently, easiest to increment
+        # here and refactor later
+        stop += 1
+        sel = np.arange(self.info['nchan'])[idx]
+
+        n_samps = self._raw_extras[fi]['n_samps']
+        buf_len = self._raw_extras[fi]['max_samp']
+        sfreq = self.info['sfreq']
         n_chan = self.info['nchan']
-        data_range = self.last_samp - self.first_samp + 1
-        s = ('%r' % os.path.basename(self.info['file_id']),
-             "n_channels x n_times : %s x %s" % (n_chan, data_range))
-        return "<RawEDF  |  %s>" % ', '.join(s)
+        data_size = self._raw_extras[fi]['data_size']
+        data_offset = self._raw_extras[fi]['data_offset']
+        stim_channel = self._raw_extras[fi]['stim_channel']
+        tal_channel = self._raw_extras[fi]['tal_channel']
+        annot = self._raw_extras[fi]['annot']
+        annotmap = self._raw_extras[fi]['annotmap']
+        subtype = self._raw_extras[fi]['subtype']
+
+        # this is used to deal with indexing in the middle of a sampling period
+        blockstart = int(floor(float(start) / buf_len) * buf_len)
+        blockstop = int(ceil(float(stop) / buf_len) * buf_len)
+
+        # gain constructor
+        physical_range = np.array([ch['range'] for ch in self.info['chs']])
+        cal = np.array([ch['cal'] for ch in self.info['chs']])
+        gains = np.atleast_2d(self._raw_extras[fi]['units'] *
+                              (physical_range / cal))
+
+        # physical dimension in uV
+        physical_min = np.atleast_2d(self._raw_extras[fi]['units'] *
+                                     self._raw_extras[fi]['physical_min'])
+        digital_min = self._raw_extras[fi]['digital_min']
+
+        offsets = np.atleast_2d(physical_min - (digital_min * gains)).T
+        if tal_channel is not None:
+            offsets[tal_channel] = 0
+
+        read_size = blockstop - blockstart
+        this_data = np.empty((len(sel), buf_len))
+        data = data[:, offset:offset + (stop - start)]
+        """
+        Consider this example:
 
-    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
-                      projector=None):
-        """Read a chunk of raw data
+        tmin, tmax = (2, 27)
+        read_size = 30
+        buf_len = 10
+        sfreq = 1.
 
-        Parameters
-        ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
+                        +---------+---------+---------+
+        File structure: |  buf0   |   buf1  |   buf2  |
+                        +---------+---------+---------+
+        File time:      0        10        20        30
+                        +---------+---------+---------+
+        Requested time:   2                       27
 
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
+                        |                             |
+                    blockstart                    blockstop
+                          |                        |
+                        start                    stop
 
-        sel : array, optional
-            Indices of channels to select.
+        We need 27 - 2 = 25 samples (per channel) to store our data, and
+        we need to read from 3 buffers (30 samples) to get all of our data.
 
-        projector : array
-            SSP operator to apply to the data.
+        On all reads but the first, the data we read starts at
+        the first sample of the buffer. On all reads but the last,
+        the data we read ends on the last sample of the buffer.
 
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
+        We call this_data the variable that stores the current buffer's data,
+        and data the variable that stores the total output.
 
-        Returns
-        -------
-        data : array, [channels x samples]
-           the data matrix (channels x samples).
+        On the first read, we need to do this::
 
-        times : array, [samples]
-            returns the time values corresponding to the samples.
-        """
-        if sel is None:
-            sel = list(range(self.info['nchan']))
-        elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
-            return (666, 666)
-        if projector is not None:
-            raise NotImplementedError('Currently does not handle projections.')
-        if stop is None:
-            stop = self.last_samp + 1
-        elif stop > self.last_samp + 1:
-            stop = self.last_samp + 1
-
-        #  Initial checks
-        start = int(start)
-        stop = int(stop)
+            >>> data[0:buf_len-2] = this_data[2:buf_len]
 
-        sfreq = self.info['sfreq']
-        n_chan = self.info['nchan']
-        data_size = self._edf_info['data_size']
-        data_offset = self._edf_info['data_offset']
-        stim_channel = self._edf_info['stim_channel']
-        tal_channel = self._edf_info['tal_channel']
-        annot = self._edf_info['annot']
-        annotmap = self._edf_info['annotmap']
-
-        blockstart = int(floor(float(start) / sfreq) * sfreq)
-        blockstop = int(ceil(float(stop) / sfreq) * sfreq)
-
-        if start >= stop:
-            raise ValueError('No data in this range')
-
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(sfreq),
-                     (stop - 1) / float(sfreq)))
-
-        gains = []
-        for chan in range(n_chan):
-            # gain constructor
-            physical_range = self.info['chs'][chan]['range']
-            cal = float(self.info['chs'][chan]['cal'])
-            unit_mul = 10 ** self.info['chs'][chan]['unit_mul']
-            gains.append(unit_mul * (physical_range / cal))
-
-        with open(self.info['file_id'], 'rb') as fid:
+        On the second read, we need to do::
+
+            >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len]
+
+        On the final read, we need to do::
+
+            >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3]
+
+        """
+        with open(self._filenames[fi], 'rb', buffering=0) as fid:
             # extract data
-            fid.seek(data_offset)
-            buffer_size = blockstop - blockstart
-            pointer = blockstart * n_chan * data_size
-            fid.seek(data_offset + pointer)
-
-            if 'n_samps' in self._edf_info:
-                n_samps = self._edf_info['n_samps']
-                max_samp = float(np.max(n_samps))
-                blocks = int(buffer_size / max_samp)
-            else:
-                blocks = int(ceil(float(buffer_size) / sfreq))
-            datas = []
-            # bdf data: 24bit data
-            if self._edf_info['subtype'] == '24BIT':
-                data = fid.read(buffer_size * n_chan * data_size)
-                data = np.fromstring(data, np.uint8)
-                data = data.reshape(-1, 3).astype(np.int32)
-                # this converts to 24-bit little endian integer
-                # # no support in numpy
-                data = (data[:, 0] + (data[:, 1] << 8) + (data[:, 2] << 16))
-                # 24th bit determines the sign
-                data[data >= (1 << 23)] -= (1 << 24)
-                data = data.reshape((int(sfreq), n_chan, blocks), order='F')
-                for i in range(blocks):
-                    datas.append(data[:, :, i].T)
-            else:
-                if 'n_samps' in self._edf_info:
-                    data = []
-                    for _ in range(blocks):
-                        for samp in n_samps:
-                            chan_data = np.fromfile(fid, dtype='<i2',
-                                                    count=samp)
-                            data.append(chan_data)
-                    for i, samp in enumerate(n_samps):
-                        chan_data = data[i::n_chan]
-                        chan_data = np.hstack(chan_data)
-                        if i == tal_channel:
+            fid.seek(data_offset + blockstart * n_chan * data_size)
+            n_blk = int(ceil(float(read_size) / buf_len))
+            start_offset = start - blockstart
+            end_offset = blockstop - stop
+            for bi in range(n_blk):
+                # Triage start (sidx) and end (eidx) indices for
+                # data (d) and read (r)
+                if bi == 0:
+                    d_sidx = 0
+                    r_sidx = start_offset
+                else:
+                    d_sidx = bi * buf_len - start_offset
+                    r_sidx = 0
+                if bi == n_blk - 1:
+                    d_eidx = data.shape[1]
+                    r_eidx = buf_len - end_offset
+                else:
+                    d_eidx = (bi + 1) * buf_len - start_offset
+                    r_eidx = buf_len
+                n_buf_samp = r_eidx - r_sidx
+                count = 0
+                for j, samp in enumerate(n_samps):
+                    # bdf data: 24bit data
+                    if j not in sel:
+                        fid.seek(samp * data_size, 1)
+                        continue
+                    if samp == buf_len:
+                        # use faster version with skips built in
+                        if r_sidx > 0:
+                            fid.seek(r_sidx * data_size, 1)
+                        ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
+                        if r_eidx < buf_len:
+                            fid.seek((buf_len - r_eidx) * data_size, 1)
+                    else:
+                        # read in all the data and triage appropriately
+                        ch_data = _read_ch(fid, subtype, samp, data_size)
+                        if j == tal_channel:
                             # don't resample tal_channel,
                             # pad with zeros instead.
-                            n_missing = int(max_samp - samp) * blocks
-                            chan_data = np.hstack([chan_data, [0] * n_missing])
-                        elif i == stim_channel and samp < max_samp:
-                            if annot and annotmap or tal_channel is not None:
-                                # don't bother with resampling the stim channel
+                            n_missing = int(buf_len - samp)
+                            ch_data = np.hstack([ch_data, [0] * n_missing])
+                            ch_data = ch_data[r_sidx:r_eidx]
+                        elif j == stim_channel:
+                            if annot and annotmap or \
+                                    tal_channel is not None:
+                                # don't bother with resampling the stim ch
                                 # because it gets overwritten later on.
-                                chan_data = np.zeros(max_samp)
+                                ch_data = np.zeros(n_buf_samp)
                             else:
-                                warnings.warn('Interpolating stim channel. '
-                                              'Events may jitter.')
+                                warnings.warn('Interpolating stim channel.'
+                                              ' Events may jitter.')
                                 oldrange = np.linspace(0, 1, samp + 1, True)
-                                newrange = np.linspace(0, 1, max_samp, False)
-                                chan_data = interp1d(oldrange,
-                                                     np.append(chan_data, 0),
-                                                     kind='zero')(newrange)
-                        elif samp != max_samp:
-                            mult = max_samp / samp
-                            chan_data = resample(x=chan_data, up=mult,
-                                                 down=1, npad=0)
-                        datas.append(chan_data)
-                else:
-                    data = np.fromfile(fid, dtype='<i2',
-                                       count=buffer_size * n_chan)
-                    data = data.reshape((int(sfreq), n_chan, blocks),
-                                        order='F')
-                    for i in range(blocks):
-                        datas.append(data[:, :, i].T)
-        if 'n_samps' in self._edf_info:
-            data = np.vstack(datas)
-        else:
-            data = np.hstack(datas)
-        gains = np.array([gains])
-        data = gains.T * data
-        if stim_channel is not None:
+                                newrange = np.linspace(0, 1, buf_len, False)
+                                newrange = newrange[r_sidx:r_eidx]
+                                ch_data = interp1d(
+                                    oldrange, np.append(ch_data, 0),
+                                    kind='zero')(newrange)
+                        else:
+                            ch_data = resample(ch_data, buf_len, samp,
+                                               npad=0)[r_sidx:r_eidx]
+                    this_data[count, :n_buf_samp] = ch_data
+                    count += 1
+                data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
+        data *= gains.T[sel]
+        data += offsets[sel]
+
+        # only try to read the stim channel if it's not None and it's
+        # actually one of the requested channels
+        if stim_channel is not None and (sel == stim_channel).sum() > 0:
+            stim_channel_idx = np.where(sel == stim_channel)[0]
             if annot and annotmap:
-                data[stim_channel] = 0
-                evts = _read_annot(annot, annotmap, sfreq, self.last_samp)
-                data[stim_channel, :evts.size] = evts[start:stop]
+                evts = _read_annot(annot, annotmap, sfreq,
+                                   self._last_samps[fi])
+                data[stim_channel_idx, :] = evts[start:stop]
             elif tal_channel is not None:
-                evts = _parse_tal_channel(data[tal_channel])
-                self._edf_info['events'] = evts
+                tal_channel_idx = np.where(sel == tal_channel)[0][0]
+                evts = _parse_tal_channel(data[tal_channel_idx])
+                self._raw_extras[fi]['events'] = evts
 
                 unique_annots = sorted(set([e[2] for e in evts]))
                 mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
 
-                data[stim_channel] = 0
+                stim = np.zeros(read_size)
                 for t_start, t_duration, annotation in evts:
                     evid = mapping[annotation]
                     n_start = int(t_start * sfreq)
                     n_stop = int(t_duration * sfreq) + n_start - 1
                     # make sure events without duration get one sample
-                    n_stop = n_stop if n_stop > n_start else n_start+1
-                    if any(data[stim_channel][n_start:n_stop]):
+                    n_stop = n_stop if n_stop > n_start else n_start + 1
+                    if any(stim[n_start:n_stop]):
                         raise NotImplementedError('EDF+ with overlapping '
                                                   'events not supported.')
-                    data[stim_channel][n_start:n_stop] = evid
+                    stim[n_start:n_stop] = evid
+                data[stim_channel_idx, :] = stim[start:stop]
             else:
-                stim = np.array(data[stim_channel], int)
-                mask = 255 * np.ones(stim.shape, int)
-                stim = np.bitwise_and(stim, mask)
-                data[stim_channel] = stim
-        datastart = start - blockstart
-        datastop = stop - blockstart
-        data = data[sel, datastart:datastop]
-
-        logger.info('[done]')
-        times = np.arange(start, stop, dtype=float) / self.info['sfreq']
-
-        return data, times
+                # Allows support for up to 16-bit trigger values (2 ** 16 - 1)
+                stim = np.bitwise_and(data[stim_channel_idx].astype(int),
+                                      65535)
+                data[stim_channel_idx, :] = stim
+
+
+def _read_ch(fid, subtype, samp, data_size):
+    """Helper to read a number of samples for a single channel"""
+    if subtype in ('24BIT', 'bdf'):
+        ch_data = np.fromfile(fid, dtype=np.uint8,
+                              count=samp * data_size)
+        ch_data = ch_data.reshape(-1, 3).astype(np.int32)
+        ch_data = ((ch_data[:, 0]) +
+                   (ch_data[:, 1] << 8) +
+                   (ch_data[:, 2] << 16))
+        # 24th bit determines the sign
+        ch_data[ch_data >= (1 << 23)] -= (1 << 24)
+    # edf data: 16bit data
+    else:
+        ch_data = np.fromfile(fid, dtype='<i2', count=samp)
+    return ch_data
 
 
 def _parse_tal_channel(tal_channel_data):
@@ -358,74 +343,15 @@ def _parse_tal_channel(tal_channel_data):
     return events
 
 
-def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
-                  hpts, preload):
-    """Extracts all the information from the EDF+,BDF file.
-
-    Parameters
-    ----------
-    fname : str
-        Raw EDF+,BDF file to be read.
-
-    n_eeg : int | None
-        Number of EEG electrodes.
-        If None, all channels are considered EEG.
-
-    stim_channel : str | int | None
-        The channel name or channel index (starting at 0).
-        -1 corresponds to the last channel.
-        If None, there will be no stim channel added.
-
-    annot : str | None
-        Path to annotation file.
-        If None, no derived stim channel will be added (for files requiring
-        annotation file to interpret stim channel).
-
-    annotmap : str | None
-        Path to annotation map file containing mapping from label to trigger.
-        Must be specified if annot is not None.
-
-    tal_channel : int | None
-        The channel index (starting at 0).
-        Index of the channel containing EDF+ annotations.
-        -1 corresponds to the last channel.
-        If None, the annotation channel is not used.
-        Note: this is overruled by the annotation file if specified.
-
-    hpts : str | None
-        Path to the hpts file containing electrode positions.
-        If None, sensor locations are (0,0,0).
+def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
+    """Extracts all the information from the EDF+,BDF file"""
 
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
-    Returns
-    -------
-    info : instance of Info
-        The measurement info.
-    edf_info : dict
-        A dict containing all the EDF+,BDF  specific parameters.
-    """
-
-    info = Info()
-    info['file_id'] = fname
-    # Add info for fif object
-    info['meas_id'] = None
-    info['projs'] = []
-    info['comps'] = []
-    info['bads'] = []
-    info['acq_pars'], info['acq_stim'] = None, None
+    if eog is None:
+        eog = []
+    if misc is None:
+        misc = []
+    info = _empty_info()
     info['filename'] = fname
-    info['ctf_head_t'] = None
-    info['dev_ctf_t'] = []
-    info['dig'] = None
-    info['dev_head_t'] = None
-    info['proj_id'] = None
-    info['proj_name'] = None
-    info['experimenter'] = None
-    info['line_freq'] = None
-    info['subject_info'] = None
 
     edf_info = dict()
     edf_info['annot'] = annot
@@ -436,8 +362,8 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
         assert(fid.tell() == 0)
         fid.seek(8)
 
-        _ = fid.read(80).strip()  # subject id
-        _ = fid.read(80).strip()  # recording id
+        fid.read(80).strip().decode()  # subject id
+        fid.read(80).strip().decode()  # recording id
         day, month, year = [int(x) for x in re.findall('(\d+)',
                                                        fid.read(8).decode())]
         hour, minute, sec = [int(x) for x in re.findall('(\d+)',
@@ -445,32 +371,45 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
         date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
         info['meas_date'] = calendar.timegm(date.utctimetuple())
 
-        edf_info['data_offset'] = header_nbytes = int(fid.read(8))
+        edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
         subtype = fid.read(44).strip().decode()[:5]
-        edf_info['subtype'] = subtype
+        if len(subtype) > 0:
+            edf_info['subtype'] = subtype
+        else:
+            edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
 
-        edf_info['n_records'] = n_records = int(fid.read(8))
+        edf_info['n_records'] = n_records = int(fid.read(8).decode())
         # record length in seconds
-        edf_info['record_length'] = record_length = float(fid.read(8))
-        info['nchan'] = int(fid.read(4))
-        if n_eeg is None:
-            n_eeg = info['nchan']
+        record_length = float(fid.read(8).decode())
+        if record_length == 0:
+            edf_info['record_length'] = record_length = 1.
+            warnings.warn('Header information is incorrect for record length. '
+                          'Default record length set to 1.')
+        else:
+            edf_info['record_length'] = record_length
+        info['nchan'] = nchan = int(fid.read(4).decode())
         channels = list(range(info['nchan']))
-        ch_names = [fid.read(16).strip().decode() for _ in channels]
-        _ = [fid.read(80).strip() for _ in channels]  # transducer type
-        units = [fid.read(8).strip().decode() for _ in channels]
+        ch_names = [fid.read(16).strip().decode() for ch in channels]
+        for ch in channels:
+            fid.read(80)  # transducer
+        units = [fid.read(8).strip().decode() for ch in channels]
         for i, unit in enumerate(units):
             if unit == 'uV':
-                units[i] = -6
-            elif unit == 'V':
-                units[i] = 0
+                units[i] = 1e-6
             else:
                 units[i] = 1
-        physical_min = np.array([float(fid.read(8)) for _ in channels])
-        physical_max = np.array([float(fid.read(8)) for _ in channels])
-        digital_min = np.array([float(fid.read(8)) for _ in channels])
-        digital_max = np.array([float(fid.read(8)) for _ in channels])
-        prefiltering = [fid.read(80).strip().decode() for _ in channels][:-1]
+        edf_info['units'] = units
+        physical_min = np.array([float(fid.read(8).decode())
+                                 for ch in channels])
+        edf_info['physical_min'] = physical_min
+        physical_max = np.array([float(fid.read(8).decode())
+                                 for ch in channels])
+        digital_min = np.array([float(fid.read(8).decode())
+                                for ch in channels])
+        edf_info['digital_min'] = digital_min
+        digital_max = np.array([float(fid.read(8).decode())
+                                for ch in channels])
+        prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
         highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
                              for filt in prefiltering])
         lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
@@ -485,12 +424,11 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
             elif highpass[0] == 'DC':
                 info['highpass'] = 0.
             else:
-                info['highpass'] = int(highpass[0])
+                info['highpass'] = float(highpass[0])
         else:
-            info['highpass'] = np.min(highpass)
-            warnings.warn('%s' % ('Channels contain different highpass'
-                                  + 'filters. Highest filter setting will'
-                                  + 'be stored.'))
+            info['highpass'] = float(np.min(highpass))
+            warnings.warn('Channels contain different highpass filters. '
+                          'Highest filter setting will be stored.')
 
         if lowpass.size == 0:
             info['lowpass'] = None
@@ -498,109 +436,64 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
             if lowpass[0] == 'NaN':
                 info['lowpass'] = None
             else:
-                info['lowpass'] = int(lowpass[0])
+                info['lowpass'] = float(lowpass[0])
         else:
-            info['lowpass'] = np.min(lowpass)
+            info['lowpass'] = float(np.min(lowpass))
             warnings.warn('%s' % ('Channels contain different lowpass filters.'
                                   ' Lowest filter setting will be stored.'))
-        n_samples_per_record = [int(fid.read(8)) for _ in channels]
-        if np.unique(n_samples_per_record).size != 1:
-            edf_info['n_samps'] = np.array(n_samples_per_record)
-            if not preload:
-                raise RuntimeError('%s' % ('Channels contain different'
-                                           'sampling rates. '
-                                           'Must set preload=True'))
-        n_samples_per_record = n_samples_per_record[0]
-        fid.read(32 * info['nchan'])  # reserved
+        # number of samples per record
+        n_samps = np.array([int(fid.read(8).decode()) for ch in channels])
+        edf_info['n_samps'] = n_samps
+
+        fid.read(32 * info['nchan']).decode()  # reserved
         assert fid.tell() == header_nbytes
 
     physical_ranges = physical_max - physical_min
     cals = digital_max - digital_min
-    info['sfreq'] = n_samples_per_record / float(record_length)
-    edf_info['nsamples'] = n_records * n_samples_per_record
-
-    if info['lowpass'] is None:
-        info['lowpass'] = info['sfreq'] / 2.
 
     # Some keys to be consistent with FIF measurement info
     info['description'] = None
     info['buffer_size_sec'] = 10.
-    info['orig_blocks'] = None
 
-    if edf_info['subtype'] == '24BIT':
+    if edf_info['subtype'] in ('24BIT', 'bdf'):
         edf_info['data_size'] = 3  # 24-bit (3 byte) integers
     else:
         edf_info['data_size'] = 2  # 16-bit (2 byte) integers
 
-    if hpts and os.path.lexists(hpts):
-        with open(hpts, 'rb') as fid:
-            ff = fid.read().decode()
-        locs = {}
-        temp = re.findall('eeg\s(\w+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)',
-                          ff)
-        temp += re.findall('cardinal\s([\d,.]+)\s(-?[\d,.]+)\s(-?[\d,.]+)\s(-?'
-                           '[\d,.]+)', ff)
-        for loc in temp:
-            coord = np.array(loc[1:], dtype=float)
-            coord = apply_trans(als_ras_trans_mm, coord)
-            locs[loc[0].lower()] = coord
-        trans = get_ras_to_neuromag_trans(nasion=locs['2'], lpa=locs['1'],
-                                          rpa=locs['3'])
-        for loc in locs:
-            locs[loc] = apply_trans(trans, locs[loc])
-        info['dig'] = []
-
-        point_dict = {}
-        point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
-        point_dict['ident'] = FIFF.FIFFV_POINT_NASION
-        point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
-        point_dict['r'] = apply_trans(trans, locs['2'])
-        info['dig'].append(point_dict)
-
-        point_dict = {}
-        point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
-        point_dict['ident'] = FIFF.FIFFV_POINT_LPA
-        point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
-        point_dict['r'] = apply_trans(trans, locs['1'])
-        info['dig'].append(point_dict)
-
-        point_dict = {}
-        point_dict['coord_frame'] = FIFF.FIFFV_COORD_HEAD
-        point_dict['ident'] = FIFF.FIFFV_POINT_RPA
-        point_dict['kind'] = FIFF.FIFFV_POINT_CARDINAL
-        point_dict['r'] = apply_trans(trans, locs['3'])
-        info['dig'].append(point_dict)
-
-    else:
-        locs = {}
-    locs = [locs[ch_name.lower()] if ch_name.lower() in locs.keys()
-            else (0, 0, 0) for ch_name in ch_names]
-    sensor_locs = np.array(locs)
-
     # Creates a list of dicts of eeg channels for raw.info
     logger.info('Setting channel info structure...')
     info['chs'] = []
     info['ch_names'] = ch_names
+    tal_ch_name = 'EDF Annotations'
+    if tal_ch_name in ch_names:
+        tal_channel = ch_names.index(tal_ch_name)
+    else:
+        tal_channel = None
+    edf_info['tal_channel'] = tal_channel
+    if tal_channel is not None and stim_channel is not None and not preload:
+        raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
+                                   ' parsed completely on loading.'
+                                   ' You must set preload parameter to True.'))
     if stim_channel == -1:
-        stim_channel = info['nchan']
-    for idx, ch_info in enumerate(zip(ch_names, sensor_locs, physical_ranges,
-                                      cals, units), 1):
-        ch_name, ch_loc, physical_range, cal, unit_mul = ch_info
+        stim_channel = info['nchan'] - 1
+    for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
+        ch_name, physical_range, cal = ch_info
         chan_info = {}
         chan_info['cal'] = cal
-        chan_info['logno'] = idx
-        chan_info['scanno'] = idx
-        chan_info['range'] = physical_range * (10 ** unit_mul)
+        chan_info['logno'] = idx + 1
+        chan_info['scanno'] = idx + 1
+        chan_info['range'] = physical_range
         chan_info['unit_mul'] = 0.
         chan_info['ch_name'] = ch_name
         chan_info['unit'] = FIFF.FIFF_UNIT_V
         chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
         chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
         chan_info['kind'] = FIFF.FIFFV_EEG_CH
-        chan_info['eeg_loc'] = ch_loc
         chan_info['loc'] = np.zeros(12)
-        chan_info['loc'][:3] = ch_loc
-        if idx > n_eeg:
+        if ch_name in eog or idx in eog or idx - nchan in eog:
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['kind'] = FIFF.FIFFV_EOG_CH
+        if ch_name in misc or idx in misc or idx - nchan in misc:
             chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
             chan_info['kind'] = FIFF.FIFFV_MISC_CH
         check1 = stim_channel == ch_name
@@ -608,32 +501,34 @@ def _get_edf_info(fname, n_eeg, stim_channel, annot, annotmap, tal_channel,
         check3 = info['nchan'] > 1
         stim_check = np.logical_and(np.logical_or(check1, check2), check3)
         if stim_check:
-            chan_info['range'] = 1
-            chan_info['cal'] = 1
-            chan_info['unit_mul'] = 0
             chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
             chan_info['unit'] = FIFF.FIFF_UNIT_NONE
             chan_info['kind'] = FIFF.FIFFV_STIM_CH
             chan_info['ch_name'] = 'STI 014'
-            info['ch_names'][idx - 1] = chan_info['ch_name']
+            info['ch_names'][idx] = chan_info['ch_name']
+            units[idx] = 1
             if isinstance(stim_channel, str):
                 stim_channel = idx
+        if tal_channel == idx:
+            chan_info['range'] = 1
+            chan_info['cal'] = 1
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
         info['chs'].append(chan_info)
-    if stim_channel is None:
-        edf_info['stim_channel'] = stim_channel
-    else:
-        edf_info['stim_channel'] = stim_channel - 1
+    edf_info['stim_channel'] = stim_channel
 
-    # TODO: automatic detection of the tal_channel?
-    if tal_channel == -1:
-        edf_info['tal_channel'] = info['nchan'] - 1
+    # sfreq defined as the max sampling rate of eeg
+    picks = pick_types(info, meg=False, eeg=True)
+    if len(picks) == 0:
+        edf_info['max_samp'] = max_samp = n_samps.max()
     else:
-        edf_info['tal_channel'] = tal_channel
+        edf_info['max_samp'] = max_samp = n_samps[picks].max()
+    info['sfreq'] = max_samp / record_length
+    edf_info['nsamples'] = int(n_records * max_samp)
 
-    if tal_channel and not preload:
-        raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
-                                   ' parsed completely on loading.'
-                                   'Must set preload=True'))
+    if info['lowpass'] is None:
+        info['lowpass'] = info['sfreq'] / 2.
 
     return info, edf_info
 
@@ -645,13 +540,10 @@ def _read_annot(annot, annotmap, sfreq, data_length):
     ----------
     annot : str
         Path to annotation file.
-
     annotmap : str
         Path to annotation map file containing mapping from label to trigger.
-
     sfreq : float
         Sampling frequency.
-
     data_length : int
         Length of the data file.
 
@@ -681,8 +573,8 @@ def _read_annot(annot, annotmap, sfreq, data_length):
     return stim_channel
 
 
-def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
-                 annotmap=None, tal_channel=None, hpts=None,
+def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
+                 stim_channel=-1, annot=None, annotmap=None,
                  preload=False, verbose=None):
     """Reader function for EDF+, BDF conversion to FIF
 
@@ -690,44 +582,47 @@ def read_raw_edf(input_fname, n_eeg=None, stim_channel=-1, annot=None,
     ----------
     input_fname : str
         Path to the EDF+,BDF file.
-
-    n_eeg : int | None
-        Number of EEG electrodes.
-        If None, all channels are considered EEG.
-
+    montage : str | None | instance of Montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Values should correspond to the electrodes in the
+        edf file. Default is None.
     stim_channel : str | int | None
         The channel name or channel index (starting at 0).
-        -1 corresponds to the last channel.
+        -1 corresponds to the last channel (default).
         If None, there will be no stim channel added.
-
     annot : str | None
         Path to annotation file.
         If None, no derived stim channel will be added (for files requiring
         annotation file to interpret stim channel).
-
     annotmap : str | None
         Path to annotation map file containing mapping from label to trigger.
         Must be specified if annot is not None.
-
-    tal_channel : int | None
-        The channel index (starting at 0).
-        Index of the channel containing EDF+ annotations.
-        -1 corresponds to the last channel.
-        If None, the annotation channel is not used.
-        Note: this is overruled by the annotation file if specified.
-
-    hpts : str | None
-        Path to the hpts file containing electrode positions.
-        If None, sensor locations are (0,0,0).
-
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
-
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawEDF
+        A Raw object containing EDF data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
     """
-    return RawEDF(input_fname=input_fname, n_eeg=n_eeg,
+    return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
                   stim_channel=stim_channel, annot=annot, annotmap=annotmap,
-                  tal_channel=tal_channel, hpts=hpts, preload=preload,
-                  verbose=verbose)
+                  preload=preload, verbose=verbose)
diff --git a/mne/io/edf/tests/data/biosemi.hpts b/mne/io/edf/tests/data/biosemi.hpts
index 6e54b9d..ad5957b 100644
--- a/mne/io/edf/tests/data/biosemi.hpts
+++ b/mne/io/edf/tests/data/biosemi.hpts
@@ -43,7 +43,7 @@ eeg Fpz -100 0 -3
 eeg Fp2 -95 31 -3
 eeg AF8 -81 59 -3
 eeg AF4 -87 41 28
-eeg Afz -93 0 36
+eeg AFz -93 0 36
 eeg Fz -72 0 69
 eeg F2 -71 29 64
 eeg F4 -67 55 50
@@ -75,8 +75,8 @@ eeg O2 95 31 -3
 # 3 cardinal landmarks
 # --------------------
 # Nasion
-cardinal 2 -91 0 -42
+cardinal nasion -91 0 -42
 # Left auricular
-cardinal 1 0 -91 -42
+cardinal lpa 0 -91 -42
 # Right auricular
-cardinal 3 0 91 -42
+cardinal rpa 0 91 -42
diff --git a/mne/io/edf/tests/data/test_edf_eeglab.mat b/mne/io/edf/tests/data/test_edf_eeglab.mat
index 6a492ce..7f90737 100644
Binary files a/mne/io/edf/tests/data/test_edf_eeglab.mat and b/mne/io/edf/tests/data/test_edf_eeglab.mat differ
diff --git a/mne/io/edf/tests/data/test_edf_stim_channel.edf b/mne/io/edf/tests/data/test_edf_stim_channel.edf
new file mode 100755
index 0000000..5480005
Binary files /dev/null and b/mne/io/edf/tests/data/test_edf_stim_channel.edf differ
diff --git a/mne/io/edf/tests/data/test_edf_stim_channel.txt b/mne/io/edf/tests/data/test_edf_stim_channel.txt
new file mode 100755
index 0000000..7e053ab
--- /dev/null
+++ b/mne/io/edf/tests/data/test_edf_stim_channel.txt
@@ -0,0 +1,717 @@
+175940.7 -217680.3 176086.4 175779.4 -217793.6 175491.8 -209183 175800.6 175840.9 175384.3 176063.5 -222814.3 -216936.6 176736.7 175402 -219499.4 115678 -220099.2 176235.8 176591 -212093 176486.8 176489.3 -218446.2 0 
+175941.1 -217680.8 176088.4 175777.9 -217794.2 175492.1 -209132.7 175803 175842.6 175384.1 176065.4 -222809.8 -216944.7 176736.3 175402.8 -219524.4 115731.9 -220099.6 176236 176592.2 -212095.6 176487.5 176492.3 -218442.2 0 
+175942.9 -217683.2 176089.5 175777.6 -217806.4 175490.5 -209125.1 175803.3 175843.4 175382.9 176065.4 -222807.4 -216946.8 176736.6 175402.7 -219535.3 115796.1 -220124.5 176235.9 176591.5 -212090.7 176486.5 176493 -218440.8 0 
+175944.8 -217684.8 176091 175778.1 -217821.5 175489.6 -209128.7 175804 175842.6 175382.9 176064.2 -222809.5 -216940.2 176738.1 175401.9 -219527.8 115897.8 -220140.5 176235.4 176591.8 -212068.1 176487.8 176492.9 -218437.9 0 
+175944.7 -217675.6 176092.7 175778.5 -217828.5 175490.8 -209132.7 175806.2 175841.8 175384.7 176064.7 -222812.1 -216936.7 176738.6 175402.4 -219506.7 116032.4 -220142.5 176235.9 176594.4 -212042 176490.9 176494 -218434 0 
+175942.8 -217664 176091.3 175779.1 -217825.6 175491.5 -209133.8 175805 175842.2 175386.3 176065 -222814.5 -216943.9 176736.7 175403.3 -219479.2 116165.2 -220149.7 176236.5 176594.1 -212032.2 176490.7 176493.2 -218435 0 
+175942.7 -217662.9 176088 175779.4 -217819.3 175490.7 -209123.9 175802 175842.3 175386.6 176063.7 -222814.5 -216946.7 176735.8 175402.9 -219449.8 116264.4 -220155.3 176236.5 176590.9 -212032.2 176488.8 176491 -218439.5 0 
+175944.4 -217666.4 176086.9 175778.9 -217814.5 175490.4 -209107.9 175802.2 175841.4 175386.3 176062.6 -222812 -216936.3 176737.3 175402.5 -219411.6 116336.8 -220146.8 176236.3 176589.4 -212027.2 176489 176490.3 -218439.8 0 
+175945.1 -217665.6 176087.6 175778 -217812.1 175490.7 -209082.1 175802.6 175840.8 175384.7 176061.5 -222811 -216926.9 176736.8 175401.9 -219361.6 116400.7 -220148.7 176236.4 176589.1 -212024.1 176488.9 176490 -218436.9 0 
+175943.4 -217663.5 176087.3 175777.3 -217810.9 175490 -209029.4 175800.6 175840.7 175381.8 176060.7 -222811.1 -216925.7 176734.6 175400.5 -219314.6 116454.3 -220170.7 176236.6 176588.7 -212035.8 176486.6 176489.1 -218438 0 
+175941.3 -217666.6 176087.2 175777.5 -217810.4 175489 -208964.9 175800.1 175840.4 175380.6 176061.5 -222812 -216928.4 176734.6 175400.2 -219282.8 116503 -220171.9 176236.5 176589.8 -212055.9 176485.3 176489.3 -218442.9 0 
+175941.5 -217674.3 176088.3 175778.3 -217810.3 175489.6 -208930.9 175802.7 175840.3 175382 176063.4 -222815.4 -216936.5 176735.6 175401.8 -219258.9 116565.2 -220134 176236.1 176591.4 -212064.7 176486.4 176490.3 -218443.1 0 
+175942.9 -217679.8 176088.5 175777.9 -217810.5 175490.8 -208926.2 175803.6 175839.9 175383.5 176063.4 -222816.7 -216943.4 176734.9 175402.5 -219232.8 116616.9 -220097.1 176235.9 176591.5 -212059.2 176486.9 176489.7 -218437 0 
+175943.6 -217680.3 176087.7 175776.5 -217810.9 175490.1 -208910.2 175801.6 175839.3 175384 176061.7 -222813.1 -216934.2 176733.6 175401.5 -219207.5 116637.4 -220089.6 176236.9 176590.7 -212047.5 176485.5 176487.8 -218431.7 0 
+175943 -217678.5 176088.3 175776.3 -217812.3 175489 -208881.3 175801.6 175839.7 175384.2 176061.6 -222809.4 -216912.8 176734.3 175401.6 -219182.5 116687 -220091.9 176238.3 176591.6 -212017.8 176486 176488 -218430.1 0 
+175941.9 -217674.2 176089.5 175776.9 -217815.9 175489.3 -208865.9 175804.2 175840.4 175383.7 176063.4 -222808.6 -216899.9 176736.1 175402.9 -219158.7 116787.6 -220081.1 176238.3 176592.9 -211965.3 176488.3 176489.8 -218431.2 0 
+175941.7 -217669.1 176088.7 175776.8 -217819.5 175489.5 -208854.6 175804.1 175840.1 175382.7 176064.5 -222809.9 -216906.5 176736.8 175402 -219152.1 116843.6 -220063.9 176236.7 176591.6 -211917.7 176487.7 176490.5 -218434.1 0 
+175942.2 -217668.7 176087.3 175776.6 -217819 175488.4 -208825.7 175801.7 175839.3 175382.8 176064.5 -222812.8 -216922.1 176736.6 175400.2 -219161.4 116806.3 -220063 176235.8 176589.8 -211892.9 176485.1 176490.1 -218434.7 0 
+175942.5 -217671.7 176087.7 175777.1 -217816 175487.8 -208785.5 175800.1 175839.2 175383.7 176064.4 -222815.5 -216929.8 176736.8 175401 -219161.8 116751.8 -220084.8 176236.1 176589.6 -211874.2 176485.1 176489.8 -218431.5 0 
+175943.1 -217673.5 176089 175777.8 -217815.4 175489.2 -208750.4 175800.3 175840.3 175383.9 176063.6 -222816 -216927.9 176737.4 175403.1 -219144.9 116761.2 -220106.8 176236.7 176590.1 -211835.9 176486.8 176490.6 -218431.4 0 
+175944 -217674.3 176089.3 175778.4 -217816.3 175490.7 -208734.5 175802.1 175841.7 175383.5 176062 -222815.5 -216927.9 176737 175403.2 -219128.9 116818.6 -220105 176237 176589.8 -211766.5 176487.4 176491.4 -218436.7 0 
+175944.5 -217674.4 176088.2 175778.5 -217816.9 175490.7 -208741.5 175802.3 175841.7 175383.4 176060.8 -222816.1 -216931.2 176735.4 175402 -219133.5 116815.9 -220089 176235.8 176588.3 -211681.9 176486.8 176490 -218440.5 0 
+175943.5 -217674 176087.2 175777.9 -217818.9 175490.1 -208749.3 175800.1 175839.7 175383.3 176061.6 -222813.8 -216929.2 176734.6 175401.2 -219153.4 116702.6 -220083.1 176234.5 176587.7 -211605 176485.7 176488 -218437.1 0 
+175941.2 -217675.1 176087.8 175777.7 -217821.7 175490.9 -208732.9 175799.9 175838.8 175382.8 176064.2 -222807.3 -216923.2 176735.3 175400.9 -219168.6 116596.6 -220089.3 176236.1 176589.8 -211536.6 176486 176489.4 -218430 0 
+175939.2 -217677.1 176088.6 175778.6 -217822.2 175492 -208687.8 175802.6 175840.4 175382.7 176065.3 -222805.1 -216925 176736 175400.9 -219178.3 116621.8 -220097.3 176238.7 176592 -211472 176488 176492 -218425.9 0 
+175939.9 -217677.9 176088 175779.4 -217818.9 175491.2 -208607 175802.9 175842.1 175383.4 176064.5 -222808.1 -216935.1 176735.9 175400.6 -219189.1 116753.6 -220103 176238.5 176591.2 -211415.9 176488.8 176491.7 -218425.8 0 
+175942.2 -217680.1 176087 175779.1 -217810.7 175489.3 -208483.9 175799.7 175841.6 175384.1 176063.7 -222808.1 -216940.1 176735.3 175400.3 -219189.3 116911.1 -220102.3 176236.8 176588.2 -211358.8 176486.3 176489 -218428.5 0 
+175942.8 -217683.8 176086.6 175778.4 -217803.5 175488.4 -208350.5 175798 175840.3 175383.8 176062.5 -222805 -216932.6 176734.8 175400.6 -219170.4 117041.5 -220100.1 176235.9 176586.6 -211280.7 176483.9 176487.5 -218433.9 0 
+175941.9 -217683.3 176087.4 175777.3 -217805.8 175487.8 -208253.2 175800.6 175839.1 175382.8 176061.4 -222803.8 -216920 176734.7 175400.6 -219145.2 117115.8 -220109.8 176236 176588.6 -211184.8 176485.1 176488.2 -218437.7 0 
+175942.9 -217678.3 176088.8 175776.7 -217815.6 175487.6 -208196.7 175804.1 175838.1 175382.4 176061.9 -222805.4 -216913.4 176735.2 175400.7 -219130.3 117158.2 -220125.5 176236.6 176592.1 -211092 176487.3 176489.7 -218435.8 0 
+175945.8 -217675.4 176089 175777.8 -217824.2 175488.5 -208150 175803.5 175838.8 175383.4 176062.7 -222808.8 -216915.8 176735.8 175402.2 -219127 117223.9 -220131.3 176237.1 176592.7 -211015.8 176486.2 176490 -218432.4 0 
+175945.5 -217677.7 176088.2 175778.6 -217826.7 175489.2 -208088.1 175800.3 175840.6 175384.6 176061.9 -222811 -216919.7 176736.4 175403.1 -219134.6 117308.6 -220130.5 176236.5 176590.2 -210967.4 176483.6 176489.1 -218432.9 0 
+175941.7 -217683.1 176088.2 175777.1 -217823.7 175489 -208023.9 175799.8 175840.5 175384.4 176060.9 -222807.5 -216917.2 176737 175401.6 -219159.8 117367.4 -220126.3 176235.5 176589.1 -210957.4 176484.7 176489.2 -218435 0 
+175940.4 -217687.9 176089.4 175775.7 -217819.8 175489.2 -207991.8 175802.5 175839 175382.8 176062 -222802.5 -216907.8 176737.3 175400 -219191.5 117427.2 -220109.9 176235.2 176591.5 -210976 176489 176490.4 -218435.2 0 
+175942.3 -217688.8 176089.9 175776.2 -217818.5 175490.1 -207989 175804 175839.3 175381.5 176063.6 -222804.1 -216901.3 176736.8 175400.6 -219208.7 117573.1 -220091.8 176235.2 176593.1 -211003.7 176491.2 176491.2 -218437.1 0 
+175943.6 -217687.9 176088.8 175777.1 -217820 175490.1 -207982.3 175801.9 175841.3 175382.1 176063 -222810.5 -216909.8 176736 175401.8 -219214.8 117797.4 -220096.4 176234.8 176591 -211038.9 176489 176490.4 -218442.5 0 
+175943.7 -217687.8 176087.3 175777.4 -217823.3 175489.5 -207967.5 175799 175841.3 175383.4 176061.3 -222814.4 -216928.2 176735.7 175401.8 -219225.7 117982.7 -220118.1 176234.2 176587.5 -211086.3 176485.4 176488.6 -218443.4 0 
+175943.3 -217685.7 176086.7 175777.3 -217824.5 175489 -207967.2 175799.5 175838.9 175383.3 176061.5 -222814.4 -216938.1 176735.7 175400.9 -219239.1 118066.5 -220131.4 176234 176586.7 -211134.6 176484.3 176487.9 -218436.7 0 
+175941.7 -217681.3 176087.5 175776 -217821.4 175488.9 -207983 175802.2 175837.8 175382.2 176062.6 -222815.3 -216936 176735.8 175400.9 -219238.5 118093.8 -220132.7 176234.6 176589.2 -211161 176485.8 176489.8 -218430.4 0 
+175940.5 -217682.4 176088.3 175774.6 -217818 175489 -207984.8 175802.9 175839.8 175382.5 176062.6 -222819.1 -216929.6 176736 175401.7 -219222.2 118113.4 -220137.8 176235.8 176591 -211152.5 176486.5 176491.5 -218430.2 0 
+175941 -217693 176087.5 175774.9 -217817.2 175489.1 -207950.2 175801.3 175841.9 175382.9 176061.7 -222820.2 -216923.3 176736 175401.6 -219214.8 118148.9 -220150.4 176236.1 176589.8 -211123.4 176485.3 176490.2 -218433.2 0 
+175942.1 -217706 176086.4 175776.3 -217816.9 175489 -207898.5 175800.7 175841.8 175381.7 176061.3 -222814.2 -216921.2 176736.3 175400.9 -219228.9 118218.1 -220156.7 176235.5 176588.2 -211098.5 176485.3 176488.6 -218434.1 0 
+175942.5 -217711.3 176086.8 175777 -217816.6 175489.3 -207858.8 175802.5 175840.9 175380.8 176062.3 -222807.2 -216923.3 176736.5 175401 -219243.2 118296.9 -220144.8 176235.9 176588.8 -211081.1 176487 176489.3 -218431.6 0 
+175941.1 -217708.2 176087.4 175777.2 -217817.7 175490 -207833.4 175804.5 175840.6 175382.1 176063.5 -222805.7 -216920.4 176736 175401.4 -219245.4 118334.3 -220119.9 176237 176590.5 -211064.5 176487.5 176490 -218430 0 
+175939.4 -217707.1 176087.1 175777.1 -217819.7 175489.5 -207811.8 175804.7 175840.4 175383.7 176063.5 -222808.2 -216910.5 176734.8 175401 -219256.7 118335.4 -220098.5 176237 176591.3 -211066.2 176487.1 176489.6 -218433.4 0 
+175940.8 -217711.1 176086.6 175776.5 -217818.6 175487.8 -207777.8 175803.8 175840 175384 176063.1 -222811.7 -216905.1 176734.1 175400.6 -219295 118353.1 -220094.6 176235.5 176591.1 -211102 176487.7 176489.5 -218438.9 0 
+175943.4 -217712 176087.1 175775.9 -217811.6 175487.3 -207719.9 175803.8 175840.1 175383.4 176062.5 -222813.3 -216910 176734.3 175400.3 -219345.1 118410.4 -220113.5 176234.1 176590.5 -211139.3 176488.1 176490.3 -218439.7 0 
+175943.3 -217707.3 176088.2 175776.1 -217802.9 175489.3 -207655.2 175804.2 175840.8 175382.8 176061.8 -222809.8 -216913.3 176735.3 175399.5 -219377.8 118496.5 -220143.3 176234.5 176590.8 -211122.5 176487.8 176491.2 -218435 0 
+175942.1 -217704.4 176088.2 175776.7 -217799 175491.1 -207610.5 175804.1 175841.2 175382.9 176061.5 -222803.4 -216906.9 176736.5 175399.1 -219388.4 118589 -220156.3 176235.6 176592.1 -211045.3 176488.1 176491.3 -218431 0 
+175942 -217703.9 176086.9 175777.4 -217803.8 175490.2 -207586.5 175803 175840.4 175382.8 176060.7 -222801.5 -216904.5 176736.3 175399.8 -219399.3 118658.9 -220140.2 176235 176592.3 -210955.8 176487.5 176490.1 -218430.3 0 
+175942.3 -217698.8 176085 175777.1 -217816.3 175488.2 -207564 175800.8 175839 175381.8 176059.5 -222804 -216912.3 176735.2 175400.1 -219424.4 118709.3 -220116.1 176234 176591.1 -210886.5 176485.7 176488 -218430.8 0 
+175943 -217689.6 176084.6 175775.3 -217828.6 175488 -207537.6 175799 175838.3 175381.1 176059.7 -222804 -216912.1 176734.3 175399.8 -219457 118772.6 -220110 176234.7 176590 -210827.5 176485 176486.5 -218432.1 0 
+175943.3 -217684 176086.2 175775 -217833.1 175489.3 -207532.1 175798.2 175839 175381.2 176060.7 -222800.6 -216899.5 176733.7 175400 -219483.2 118847.2 -220125.2 176235.5 176589.1 -210775.3 176485 176486.8 -218434.2 0 
+175942.2 -217683.5 176087.2 175777.7 -217832 175489.8 -207563.9 175798.1 175840.2 175381.4 176061.3 -222798.8 -216894.8 176733.1 175400.5 -219488.2 118911.7 -220143.8 176235.4 176587.4 -210748.7 176485 176487.6 -218434.1 0 
+175942 -217683.2 176086.4 175779.3 -217832.5 175489.4 -207606.1 175799.8 175841 175382.2 176061.7 -222800.2 -216905.2 176734 175400.8 -219476.7 118977.3 -220144.2 176235.3 176586.7 -210747 176485.6 176488.4 -218432.6 0 
+175943.3 -217681.3 176085.8 175777.3 -217837.7 175488.3 -207622.9 175801.4 175841 175383.2 176061.9 -222802.7 -216914.4 176735.5 175400.8 -219477.2 119081.2 -220130.8 176235.1 176588.3 -210749.1 176486.1 176489.6 -218433.2 0 
+175942.6 -217678.9 176086.3 175775.1 -217842.8 175486.9 -207608.5 175800.7 175840.2 175382.8 176060.9 -222803.1 -216912.5 176735.5 175400.8 -219505.3 119262 -220123.9 176234.8 176589.9 -210743.1 176485.5 176490.6 -218434.5 0 
+175940.9 -217676 176087.4 175775.6 -217841.4 175486.9 -207579.2 175800.5 175839.1 175381.7 176059.8 -222800 -216903.6 176734.2 175401.3 -219545.5 119503.9 -220121.2 176235.5 176590 -210728.1 176485.4 176490.9 -218433.9 0 
+175941.2 -217671.4 176088.1 175776.1 -217834.8 175487.9 -207552.2 175803 175838.4 175381.7 176060.4 -222797.5 -216891.7 176733.5 175400.8 -219572.9 119711.6 -220109.1 176236.5 176589.9 -210713.1 176486.4 176490.7 -218432.5 0 
+175942.2 -217669.4 176087.2 175774.3 -217832.4 175487.9 -207533.8 175803 175839 175382.1 176061.3 -222800.1 -216877.7 176733.5 175399 -219579.2 119827.5 -220094.5 176235.5 176588.7 -210722.9 176486 176489.3 -218432.6 0 
+175942.7 -217673.2 176085.5 175772.2 -217835.7 175487.4 -207526.8 175799.4 175839.8 175381.5 176060.5 -222804.1 -216862.3 176733.9 175398 -219576.7 119907.7 -220094.6 176234.1 176586.4 -210766.3 176484.2 176486.5 -218433.8 0 
+175943.5 -217676.8 176085.1 175772.5 -217838.3 175488.2 -207536.3 175799.1 175839.3 175381.4 176060.4 -222803.8 -216849.4 176734.6 175398.6 -219581.8 120033.8 -220113.6 176234.5 176587 -210815.3 176484.5 176486.2 -218432.2 0 
+175943 -217676.6 176086.3 175774.8 -217838.6 175489 -207558.9 175803.1 175837.7 175382.6 176062.2 -222802 -216850.5 176735.2 175399.2 -219605.6 120223.4 -220133.6 176235.4 176590 -210849.9 176486.9 176489.5 -218428 0 
+175941.4 -217675.6 176087.2 175776 -217838.9 175488.4 -207579.1 175804.5 175836.8 175383.3 176063.6 -222803.6 -216866.2 176735.3 175399.7 -219638.8 120425.8 -220137 176234.3 176590.5 -210874.1 176487.7 176491.2 -218428.4 0 
+175941.4 -217674.9 176086.5 175775.7 -217840.2 175487.9 -207577.3 175802.1 175837.3 175382.8 176063.5 -222805.9 -216881.3 176735.5 175399.9 -219647.9 120579.6 -220127.8 176233 176588.6 -210880.8 176485.8 176489.2 -218434.1 0 
+175941.9 -217674.8 176084.6 175775.8 -217845.6 175487.8 -207532.8 175800.2 175838.2 175382 176062.6 -222804.9 -216888.9 176735.3 175398.8 -219621 120687.6 -220119.5 176233.3 176587.6 -210859.1 176484 176487 -218436.2 0 
+175940.4 -217675.7 176083.2 175775.8 -217853.7 175487.2 -207441.1 175800.5 175838.6 175381.2 176061.1 -222801.3 -216895.3 176734.4 175398.2 -219583.5 120804.2 -220121.3 176234.1 176587.7 -210829.2 176484.7 176486.8 -218433 0 
+175938.7 -217675.2 176084 175775.1 -217851.5 175486.7 -207330.3 175801.4 175839.3 175380.9 176060.2 -222798.2 -216904.6 176733.8 175399.7 -219546.6 120939.7 -220133.8 176234.7 176588.5 -210810.3 176486.2 176487.9 -218432.2 0 
+175939.1 -217673.6 176086.1 175775.6 -217834.4 175487.2 -207235.2 175800.8 175839.6 175380.9 176060.2 -222797.9 -216913.8 176734 175400.7 -219503.5 121042 -220142.4 176235.4 176588.9 -210780.1 176485.8 176488.4 -218434.9 0 
+175940.4 -217674.8 176086 175776.8 -217818.2 175488 -207164.3 175798.9 175838.3 175380.8 176059.7 -222798.1 -216917.5 176733.9 175399.7 -219460.5 121081.9 -220138.6 176235.1 176588 -210721.4 176484.2 176486.8 -218436.1 0 
+175941.1 -217676.1 176084.1 175776.6 -217815.1 175488.3 -207103.1 175798 175837.5 175381.2 176058.9 -222795.5 -216912.9 176733.7 175398.8 -219422.9 121101.2 -220136.5 176234 176587.5 -210660 176484.1 176485.6 -218436 0 
+175941.2 -217674.5 176084.1 175776.2 -217819.6 175488.6 -207034.5 175798.8 175839.1 175382.1 176059.3 -222793 -216898.6 176734.4 175399.3 -219381.7 121153.1 -220144.2 176234.3 176588.6 -210624.5 176486.2 176487.6 -218436.2 0 
+175941.3 -217674 176085.8 175776.8 -217822.6 175488.3 -206959.6 175799.4 175840.9 175381.9 176060.6 -222795.3 -216880 176734.8 175399.9 -219341 121237.5 -220151.4 176235.8 176588.6 -210623.9 176487.4 176489.8 -218436.2 0 
+175940.4 -217675.1 176085.3 175776.4 -217822.6 175487.1 -206898.5 175799 175840.5 175381.1 176061.5 -222798.7 -216876.3 176733.4 175399.6 -219318.1 121319.1 -220152.5 176235 176586.6 -210654.3 176485.6 176488.5 -218436.2 0 
+175939.3 -217671.8 176083.7 175774.7 -217819.9 175486.6 -206862.3 175799.9 175839.5 175381.6 176061.8 -222795.7 -216893.8 176732.5 175399.1 -219314.4 121391.4 -220152.4 176233.3 176586.9 -210686.8 176484.8 176487.4 -218435.1 0 
+175939.7 -217667.5 176084.7 175774.2 -217814.2 175487.7 -206830.6 175801.7 175839.3 175382.8 176062.3 -222789.7 -216909.7 176733.9 175399.1 -219322.7 121496.5 -220150.5 176233.6 176589.5 -210694.1 176487.5 176488.5 -218432.3 0 
+175940.8 -217668.6 176086.8 175774.8 -217811.1 175487.8 -206779.6 175800.6 175838.7 175382.3 176061.8 -222791.2 -216910.9 176735.2 175398.6 -219350.6 121649 -220137.6 176234.2 176589.7 -210683.5 176488.1 176488.5 -218430.5 0 
+175941.1 -217671.1 176087 175774.5 -217817.4 175486.1 -206714.2 175797.4 175837.5 175380.9 176060.1 -222799.7 -216910.5 176735 175398 -219391.8 121781.8 -220111.6 176233.2 176587.5 -210666.3 176485.5 176487 -218431.8 0 
+175940.1 -217671.7 176085.5 175774 -217826.4 175485.6 -206640.3 175796.2 175837.3 175380.8 176059 -222806.8 -216916 176733.8 175398.3 -219417.5 121831 -220094 176232.3 176586.5 -210645.1 176482.7 176487.1 -218436.1 0 
+175939.1 -217672.6 176084.7 175774.6 -217825.8 175486.1 -206538.1 175797.6 175837.4 175380.8 176059.6 -222806.3 -216919.4 176732.1 175398.9 -219424.5 121834.5 -220102.7 176232.3 176586.8 -210633.5 176481.9 176488.7 -218442.4 0 
+175939.8 -217671.1 176085.7 175775.4 -217817.7 175485.6 -206412.7 175799.6 175837.3 175379.8 176060.9 -222799.8 -216920.3 176731.4 175398.6 -219430.6 121868.3 -220119.3 176232 176587.3 -210651.8 176483.1 176489.8 -218446.3 0 
+175941.4 -217665.5 176086.6 175775.7 -217813.6 175486.2 -206316.3 175800.7 175837.8 175379.8 176061.8 -222796.8 -216923.3 176733 175398 -219431.9 121931.1 -220123 176232.5 176588.3 -210705.5 176485 176489.3 -218443.5 0 
+175941.6 -217662 176085.2 175775.2 -217815.3 175488.6 -206275.7 175800.3 175838.3 175381.2 176061.5 -222801.7 -216922.7 176734.8 175398.7 -219421.5 121991.7 -220127.6 176234.5 176589.3 -210761.2 176485.6 176487.9 -218437.2 0 
+175940.1 -217666.5 176083.4 175774.3 -217815.2 175488.8 -206252.1 175799.5 175837.8 175381.8 176060.5 -222808.2 -216917 176734.4 175399.9 -219403.2 122055.8 -220141.2 176235.2 176588.9 -210766.5 176485.1 176487.2 -218434.5 0 
+175938.9 -217674.9 176084 175774.7 -217812.4 175486.8 -206195.5 175799.6 175837.7 175380.5 176060.8 -222810.2 -216914.6 176733.4 175399.5 -219379.2 122092.8 -220142.5 176233.8 176588.8 -210721.7 176485 176487.5 -218434.6 0 
+175939.8 -217678.3 176085.9 175776 -217812.3 175486.1 -206102.6 175800 175838.1 175379.1 176062.4 -222807.7 -216916 176733.4 175398.3 -219359.5 122054.1 -220128.4 176233 176590.1 -210692.5 176485.5 176488.5 -218434.7 0 
+175940.9 -217677.1 176085.4 175775.6 -217813.8 175486.6 -206012.3 175798.9 175837.5 175378.3 176062.3 -222804.6 -216915.4 176733.2 175397.9 -219351.1 121976.7 -220122.3 176233.4 176590.3 -210719.9 176484.8 176488.5 -218438 0 
+175940 -217679.7 176083.3 175773.9 -217811.4 175486.7 -205953.4 175798 175836.8 175378.6 176060.5 -222803.3 -216913.5 176732.5 175398.2 -219346.8 121907.7 -220129.6 176233.1 176588.4 -210763.1 176483.8 176488 -218442.8 0 
+175939.7 -217687 176084.2 175773 -217805.6 175487.2 -205918.9 175799.2 175838 175380.4 176059.1 -222802.7 -216914.1 176732.7 175398 -219350.2 121830.1 -220138.2 176232.6 176586.9 -210773 176484.8 176488.8 -218442.7 0 
+175941.2 -217692.8 176086.9 175773.5 -217802.6 175487.9 -205889.7 175800.1 175840 175382.1 176058.9 -222803.5 -216917.9 176733.1 175398.3 -219369.6 121737 -220140 176232.8 176585.9 -210766.5 176485.3 176489.2 -218437.9 0 
+175941.1 -217694.2 176087.1 175774.3 -217806.3 175487.4 -205860.1 175799.8 175840.5 175382 176059.4 -222805.2 -216920.5 176733.5 175399.5 -219398.1 121642.2 -220134.1 176233.2 176585.6 -210781.5 176483.5 176487.4 -218431.7 0 
+175940 -217692.6 176085.4 175774.3 -217811.7 175486.9 -205842.3 175800.1 175838.7 175381.2 176060.2 -222804.7 -216918.7 176734.4 175400.5 -219426.5 121532.4 -220128.3 176233.4 176587.5 -210820.9 176482.5 176486.8 -218425.9 0 
+175940.5 -217692.3 176083.3 175773.6 -217815.4 175486.5 -205845.1 175799.6 175836 175379.9 176060.2 -222803.8 -216919.8 176733.7 175399.9 -219454.7 121417.9 -220132.1 176233.3 176589.5 -210874.3 176482.3 176487.3 -218425.6 0 
+175941.4 -217695.2 176081.5 175773.1 -217818.7 175485.8 -205854.4 175798 175834.6 175378.3 176059.1 -222803.4 -216928.5 176731.7 175398.5 -219489.7 121381.2 -220143.7 176232.6 176590.1 -210927.5 176482.1 176486.5 -218430.1 0 
+175940.6 -217696.9 176081.8 175773.5 -217817.8 175485.5 -205860.9 175798.5 175835.5 175377.6 176057.9 -222801.9 -216935 176731.2 175397.7 -219533.3 121474.4 -220156.5 176232.6 176589.9 -210954.1 176483.5 176486.8 -218431 0 
+175939.1 -217696 176083.8 175774.5 -217812.6 175485.9 -205871.5 175800.1 175837.8 175377.4 176058.2 -222802.6 -216933.8 176732.5 175398.3 -219574.4 121614.3 -220164.1 176233.5 176587.8 -210942 176484.7 176488.7 -218429.1 0 
+175939.2 -217695.4 176084.5 175775.2 -217810.5 175486.7 -205872.6 175799.8 175839.3 175378.3 176060.2 -222804.4 -216931.8 176733.2 175399.3 -219600.4 121692.6 -220160 176233.8 176585.6 -210906.9 176485.1 176488.5 -218428.7 0 
+175940.6 -217696.2 176084.2 175774.9 -217809.7 175487.2 -205847.1 175799 175838.5 175380.1 176062.2 -222803.1 -216933.8 176732.9 175399.3 -219602.6 121749.5 -220150 176233.7 176586.4 -210866.9 176486.3 176486.8 -218429.8 0 
+175941.3 -217699.7 176084.2 175774.2 -217807.1 175486.3 -205810.4 175798.4 175837 175380.1 176061.9 -222801.9 -216937.3 176732.1 175398.3 -219588.2 121891.4 -220151.8 176234 176587.4 -210826.6 176486.1 176486.6 -218433.9 0 
+175940.4 -217702.8 176084.3 175773.7 -217809.3 175485.3 -205773.3 175798.3 175836.1 175379.4 176060.1 -222802.4 -216938.4 176732.2 175397.5 -219580.3 122094.9 -220164.9 176234.2 176587 -210776.5 176483.9 176487.8 -218437.1 0 
+175938.9 -217697.2 176084.2 175773.7 -217814.5 175485.5 -205735.6 175799.6 175835.8 175380.5 176060 -222801.5 -216935.9 176733.5 175397.7 -219579.4 122253.1 -220168 176234 176587.7 -210705.2 176483.4 176489.2 -218432.1 0 
+175938 -217686.4 176083.9 175774.3 -217811 175486.2 -205731.8 175799.1 175836.4 175381.5 176061.1 -222802.5 -216934.8 176733.9 175397.8 -219564.5 122354.2 -220154.9 176233.6 176587.7 -210627.5 176482.4 176488.3 -218426.3 0 
+175937.9 -217681.4 176084 175774.6 -217800.3 175486.4 -205798.7 175796.2 175837.2 175380.5 176061.1 -222805.5 -216937.1 176733 175397 -219531 122458.7 -220136.5 176233 176585.6 -210573.3 176480.3 176485.5 -218426.4 0 
+175938.8 -217682.1 176085.1 175773.9 -217795.9 175486.4 -205923.6 175796.8 175837 175379.2 176060.2 -222802.6 -216934.2 176733 175397.4 -219499.2 122561.1 -220117.9 176232.5 176585.6 -210544.3 176482.4 176484.2 -218426.6 0 
+175940.8 -217683.6 176085.5 175773 -217801.7 175486.6 -206069.6 175800.5 175836.3 175379.4 176058.6 -222796.6 -216923.4 176733.4 175398.9 -219508.1 122617.8 -220104.1 176232.7 176587.7 -210536.5 176486.7 176484.9 -218426.2 0 
+175941.9 -217686.7 176083.7 175773 -217808.4 175486.5 -206210.5 175801 175836.1 175380.3 176057 -222796.2 -216917.5 176732.5 175398.6 -219570.2 122652 -220101 176232.9 176587 -210558.9 176486.9 176484.7 -218431.5 0 
+175940.5 -217690 176082.5 175773.8 -217811.5 175486.4 -206339.4 175798.8 175836.1 175380.8 176057.1 -222799.4 -216923.3 176731.9 175396.9 -219632.5 122722.1 -220105.2 176232.5 176584.7 -210581.6 176485.1 176484.7 -218439 0 
+175939.1 -217688.4 176084.2 175774.9 -217814.1 175487.1 -206455.3 175798.5 175836.5 175381.7 176058.5 -222801.4 -216931.9 176732.5 175397.2 -219647.8 122822.3 -220115.5 176232.4 176584.6 -210568.2 176484.6 176486.8 -218438.8 0 
+175940 -217684.4 176085.8 175775.1 -217817.4 175487.6 -206541.3 175799.2 175836.7 175382.5 176059.5 -222802.9 -216936.1 176733.2 175399.1 -219631.7 122875.9 -220131.3 176232.6 176585.7 -210560.7 176484.6 176488.2 -218429.2 0 
+175940.2 -217684 176085.5 175774 -217820 175486.7 -206587.4 175799.9 175836 175381.7 176060.2 -222802.8 -216936.6 176733 175399.3 -219608.6 122830.4 -220139.1 176232.4 176587 -210609.9 176484.8 176487.6 -218420.3 0 
+175939 -217685.6 176085.8 175774.2 -217821.3 175486.3 -206613.6 175801.5 175836.4 175380.2 176061.1 -222799.5 -216934.9 176732.3 175397.9 -219586 122725.8 -220128.2 176233.1 176588.9 -210686 176485.8 176487.6 -218422.2 0 
+175938.2 -217686.4 176086.9 175775.9 -217821.9 175486.8 -206641.4 175801.8 175837.8 175380.2 176061.1 -222799.5 -216934.3 176731.5 175398 -219573.9 122640.7 -220109.3 176234.3 176588.9 -210736.9 176486.2 176488.7 -218432.1 0 
+175937.7 -217689 176086.2 175775.6 -217821.5 175486.7 -206693.5 175799.4 175837.4 175380.8 176060.4 -222805.5 -216936.1 176731.7 175399.1 -219568.5 122593.6 -220104.5 176234.3 176587 -210751.7 176484.6 176487.6 -218436.7 0 
+175938.6 -217693.4 176085.1 175773.8 -217819 175486.8 -206807.9 175797.6 175835.5 175380.2 176060 -222809.2 -216936.1 176732.9 175399.5 -219549.5 122546.7 -220121.7 176233.9 176587.3 -210746.2 176483 176484.9 -218429.5 0 
+175940.5 -217694.8 176085 175773.8 -217816.8 175487.4 -206977.7 175797.8 175835.8 175379.4 176059.8 -222807 -216934.7 176733.2 175399.2 -219512.8 122479.1 -220143.7 176234.3 176589.5 -210737.9 176483.6 176484.3 -218419.9 0 
+175940.1 -217694.1 176085.3 175774.8 -217818 175486.5 -207120.5 175798.1 175838 175379.6 176059.1 -222804 -216935.8 176732.2 175398.6 -219479.6 122408.8 -220149.9 176234.1 176589.3 -210729.7 176484.3 176486 -218417.7 0 
+175938.5 -217695 176085.8 175774.1 -217820.4 175485.2 -207183.8 175798.3 175839.3 175380.3 176057.9 -222802.8 -216936.3 176731.5 175397.9 -219471.8 122356.8 -220141.7 176233.3 176587.3 -210715.5 176483.6 176487.4 -218422 0 
+175938.7 -217694.4 176086.3 175773.3 -217818.8 175486.2 -207212.1 175798.9 175838.8 175380.4 176058 -222804.6 -216932.7 176730.7 175397.8 -219484 122319.6 -220133.8 176233.3 176586.5 -210696.3 176484.1 176488.1 -218428 0 
+175939.5 -217691 176085.4 175773.3 -217812.5 175488.1 -207257.5 175799.6 175837.1 175380 176059.7 -222809.8 -216930.4 176730.2 175397.5 -219486.1 122289.5 -220135.6 176234.2 176586.8 -210672 176485.5 176487.8 -218433 0 
+175939.1 -217688 176084.4 175772.7 -217807.4 175488.6 -207308.4 175799.9 175835.6 175379.7 176060.7 -222810.6 -216934.5 176732 175396.1 -219462.2 122274 -220144 176234.8 176586.9 -210644 176485.5 176486.7 -218434.7 0 
+175938.7 -217684.3 176084.7 175771.9 -217807.2 175487.7 -207335.9 175799 175835.4 175379.9 176059.9 -222806 -216942 176734.4 175396.3 -219433.3 122281.9 -220147.2 176234.1 176587 -210617 176484.2 176486.2 -218432.7 0 
+175938.9 -217680.2 176085.7 175772.3 -217807.3 175486.9 -207339.4 175797.6 175836.2 175380 176058.9 -222806.4 -216945.8 176734.8 175398.7 -219426.3 122314.1 -220145.7 176232.5 176587.7 -210575.2 176484 176486.8 -218431 100 
+175939.1 -217680.8 176086.4 175774.5 -217804.9 175486.9 -207332.4 175797.7 175837.2 175379.9 176059 -222812.1 -216943.2 176734.4 175400 -219438.5 122368 -220152.5 176232.1 176588.2 -210506.7 176485.3 176487.7 -218434.7 0 
+175939.2 -217686.5 176086.4 175776.4 -217802.9 175486.8 -207329.8 175797.8 175838.5 175380.4 176059.5 -222813.5 -216937 176734.8 175399.2 -219440.7 122436.3 -220162 176232.6 176587.9 -210445.5 176485.8 176487.5 -218440.8 0 
+175939.5 -217690.4 176086 175775.8 -217803.3 175485.9 -207345.2 175796.4 175839.2 175380.5 176058.7 -222810.9 -216930.9 176734.4 175398.8 -219426.1 122519 -220159.8 176231.6 176587.1 -210419.1 176483.9 176486.5 -218439.8 0 
+175939.7 -217686.4 176086.2 175773.5 -217805.1 175485.3 -207360.2 175796.4 175838.2 175379.3 176057.5 -222809.5 -216925.9 176733 175398.9 -219439.5 122636.3 -220156.9 176230.2 176587.5 -210397.8 176482.5 176486.7 -218430.3 0 
+175940.1 -217678.9 176085.8 175772.4 -217804.9 175485.9 -207338.9 175797.3 175836.8 175378 176057.7 -222809.2 -216924.5 176732.4 175398.2 -219504.4 122798.2 -220162.7 176230.6 176588.3 -210353.3 176482.9 176487.2 -218423.2 0 
+175940.4 -217677.7 176084.1 175772.9 -217801.5 175486.7 -207301.5 175796.1 175836.5 175378.3 176058.9 -222809.3 -216929 176732.9 175397.3 -219560 122967.4 -220157.8 176232.3 176587.6 -210290.3 176483 176486.5 -218424.2 0 
+175939.6 -217681.7 176083.7 175773.2 -217798.5 175487 -207298.2 175796.1 175837 175379.6 176059.3 -222809.8 -216937.4 176733.3 175397.8 -219550.6 123098.3 -220132.7 176233.5 176587.2 -210205.2 176482.9 176486.7 -218424.9 0 
+175939.1 -217684.7 176085.2 175773.7 -217799 175487.2 -207320.6 175799.5 175837.4 175381 176058.6 -222810.3 -216943.3 176733 175399.4 -219505.6 123187.9 -220111.2 176234.1 176588.5 -210102.1 176483.7 176488.3 -218421.2 0 
+175940.8 -217687.7 176085.5 175775.6 -217801.8 175487 -207334 175800.7 175837.7 175381.2 176058.3 -222809.7 -216941.1 176731.9 175399.5 -219473.5 123245.1 -220114.8 176233.7 176589.1 -210020.3 176483.7 176488.2 -218420.7 0 
+175942.4 -217689.4 176084.2 175776.7 -217802.7 175486.4 -207343.4 175797.7 175837.5 175380.3 176059.3 -222807.5 -216931.8 176730.9 175398.7 -219470.1 123274.5 -220136.2 176232.3 176587.2 -209993.4 176482.8 176486.6 -218424.5 0 
+175941.4 -217685.9 176084.6 175775.4 -217802.1 175486.7 -207363.1 175796.8 175837.3 175380 176060.6 -222804.3 -216921.6 176731.2 175399 -219492.3 123313.2 -220154.5 176231 176586.4 -210014.9 176483.9 176486.2 -218425.9 0 
+175939.5 -217683.5 176086.3 175774.2 -217804.4 175487.5 -207382.5 175799.2 175837.7 175380.3 176060.5 -222803.4 -216915.2 176732.7 175399.5 -219517.6 123387.8 -220160.1 176231.9 176588.8 -210056.9 176486.2 176486.9 -218424.3 0 
+175938.5 -217688.6 176085.4 175774.3 -217809.8 175487.6 -207390.3 175799.4 175837.7 175380.4 176059.1 -222806.3 -216915.3 176732.5 175398.6 -219525.9 123446.4 -220156.9 176233.7 176590.1 -210098 176485.8 176486.4 -218423.4 0 
+175937.9 -217694 176083.1 175773.9 -217813.2 175487.1 -207380 175797.3 175836.9 175380.3 176058.1 -222809.6 -216922 176730.7 175397 -219518.7 123436.9 -220147.8 176233.9 176587.8 -210128.4 176482.7 176484.7 -218425.4 0 
+175938.5 -217692.7 176083.2 175772.5 -217812.2 175486.5 -207358 175797.4 175837.5 175380.3 176058.7 -222809.6 -216929.4 176731.1 175396.6 -219502.6 123402.8 -220135.9 176232.9 176586 -210144.8 176481.2 176484.4 -218429.6 0 
+175940 -217688.3 176084.2 175771.7 -217807.1 175485.5 -207366.7 175799 175839.1 175380.1 176059.6 -222810 -216931.5 176733.3 175397.4 -219484.8 123423.6 -220127 176232.2 176586.3 -210160.3 176482.2 176485.1 -218433 0 
+175940.5 -217684.7 176083.8 175771.7 -217801.8 175485.4 -207424 175798.5 175838.9 175379.7 176058.7 -222813.8 -216930.6 176733.7 175397.4 -219466.2 123508.1 -220122.6 176231.8 176585.8 -210190.4 176483.3 176484.4 -218432.6 0 
+175938.9 -217681.1 176083.7 175772.6 -217799.5 175486.7 -207469 175798.3 175837.5 175379.8 176057.4 -222815.3 -216932.6 176733.6 175396.5 -219436.7 123606.9 -220124 176231.9 176585.1 -210220.5 176484.6 176484.2 -218428.7 0 
+175936.9 -217678.7 176084.2 175773.8 -217799.2 175486.9 -207460.8 175799.4 175836.9 175380.3 176057.8 -222810.7 -216932.5 176733.8 175396.7 -219415.3 123686.6 -220133.2 176232.8 176585.6 -210229.9 176485.3 176485.8 -218424.9 0 
+175937.2 -217679.4 176083.9 175773.3 -217798.4 175485.8 -207445.3 175799.5 175837.1 175380 176058.4 -222804.6 -216923 176732.6 175397.4 -219434 123736.7 -220138.9 176234 176586.2 -210227.4 176484.6 176487.9 -218422.9 0 
+175939.4 -217679.9 176084.3 175773 -217797.3 175485.7 -207462.9 175799 175837.2 175379 176058.5 -222802.1 -216911.4 176732.1 175397.5 -219473.9 123739.8 -220130.8 176234 176587.5 -210240.8 176484.8 176488.9 -218422.4 0 
+175940.6 -217678.1 176084.7 175774.5 -217799.5 175486.1 -207490.3 175798.3 175837.4 175378.2 176058.5 -222803.7 -216911.1 176733.1 175397.4 -219492.4 123693.1 -220118.1 176231.8 176587.8 -210279.8 176484.9 176487.7 -218424.2 0 
+175940.2 -217677.6 176083.4 175775.8 -217805.5 175485.5 -207492 175797.4 175837.1 175378.8 176058.1 -222805.5 -216919.6 176732.7 175397.7 -219481.7 123636.7 -220116 176230.1 176585.4 -210311.4 176483.4 176485.6 -218426 0 
+175939.1 -217678.8 176082.5 175774.5 -217807.8 175484.9 -207473.2 175797.5 175836.6 175380.1 176058 -222804.4 -216925.8 176730.9 175397.7 -219457.6 123618.1 -220123.9 176231.4 176584.3 -210307.3 176482.9 176485.8 -218423.9 0 
+175937.6 -217677.8 176083.4 175772.9 -217804.1 175484.9 -207473.4 175797.1 175836.8 175380.8 176058.9 -222803.8 -216929.4 176730.8 175397.2 -219441.7 123652.4 -220129.1 176232.7 176586 -210306 176482.9 176487.2 -218422.3 0 
+175937.6 -217674.9 176084.2 175773.1 -217801.7 175484.8 -207516.1 175795.3 175837.4 175380.1 176059.7 -222805.1 -216931.8 176731.8 175396.4 -219444.5 123725.6 -220131.4 176231.6 176587.8 -210347.3 176481.7 176487.3 -218423.7 0 
+175939.6 -217673.4 176084.3 175774 -217802.7 175485 -207590.3 175795.6 175837.2 175378.8 176059 -222803 -216925.8 176732 175396.5 -219455 123807.6 -220141.4 176231.2 176588.7 -210401 176482.3 176487 -218421.1 0 
+175940.2 -217672 176084.1 175773.8 -217803.7 175485.7 -207686.6 175797.9 175836.4 175378.2 176057.8 -222799.3 -216914.9 176731.5 175397 -219470.7 123879.2 -220156 176232.9 176589 -210424.6 176484.4 176486.6 -218414.4 0 
+175938.5 -217670.9 176083.8 175773.3 -217805.2 175486 -207798.9 175798.2 175835.9 175378.4 176057.9 -222801.3 -216911.3 176731.2 175397 -219497.9 123956.7 -220159.4 176233.8 176588.1 -210418.9 176484.5 176485.7 -218412 0 
+175937.2 -217672.2 176083.2 175773.2 -217807.8 175485.3 -207899.8 175796.4 175835 175378.6 176058.5 -222806.3 -216912.7 176730.9 175396.8 -219520.5 124047.1 -220146.7 176232.4 176585.3 -210408.3 176482.3 176484.9 -218414.7 0 
+175937.1 -217675.1 176082.3 175772.7 -217811.8 175484.1 -207960.4 175795 175833.4 175378.1 176057.9 -222808.2 -216914.3 176730 175397.2 -219527.7 124121.5 -220130.7 176231 176582.6 -210412.9 176480.1 176484.3 -218416.6 0 
+175938.1 -217677.2 176082 175771.4 -217816.2 175483.4 -207989.6 175795.9 175833.9 175378 176057.5 -222807.2 -216920 176730 175397.6 -219534.6 124190.4 -220126.4 176231.3 176583.5 -210430.3 176480.1 176484.3 -218417.9 0 
+175939.2 -217675 176083.3 175771.8 -217815.6 175484.9 -208034.5 175799.1 175837 175379.3 176058.7 -222805.7 -216925.3 176732.4 175397.1 -219547.3 124297.8 -220130.6 176232.5 176586.8 -210446.5 176482.5 176485.7 -218420.2 0 
+175939.3 -217668.6 176085.1 175775 -217808.8 175487.9 -208131.1 175800.4 175838.5 175380.5 176060 -222806.4 -216922.8 176734.6 175396.7 -219549.7 124415.3 -220130 176233.5 176588 -210465.2 176483.7 176486.8 -218422.2 0 
+175939 -217667.7 176085.6 175777.2 -217804 175489.1 -208250.8 175797.9 175836.7 175380.7 176059.9 -222808.8 -216919.5 176733.4 175397.4 -219531.8 124478 -220124.8 176233.5 176586.6 -210495.4 176483 176486.1 -218423.4 0 
+175938.3 -217676 176084.5 175775.6 -217806.7 175487.6 -208331.7 175795.9 175834.3 175379.7 176059.2 -222808.1 -216923.8 176730.8 175397.3 -219507.3 124499.6 -220122.8 176232.7 176585.8 -210525.3 176483.3 176485.5 -218422.4 0 
+175936.6 -217680.6 176083.6 175772.4 -217811.2 175485.9 -208364.5 175797.3 175834.2 175378 176058 -222805.3 -216929.5 176730.5 175395.1 -219498.3 124550.8 -220123.5 176231.8 176586.4 -210536.9 176484.6 176486.4 -218417 0 
+175936.6 -217676 176083.5 175771 -217809.5 175485.4 -208393.7 175798.9 175835.7 175377.3 176056.6 -222805.8 -216926.3 176731.7 175394.5 -219511.2 124673.3 -220124 176231.4 176586.4 -210536.8 176484.4 176487.4 -218412 0 
+175938.6 -217672.4 176083.1 175771.7 -217805 175485.3 -208435.3 175798.1 175836.7 175378.2 176056.1 -222809.3 -216915 176731.8 175396.5 -219535.2 124851.6 -220127.8 176231.8 176584.9 -210549.3 176482.6 176487 -218414.8 0 
+175939.9 -217672.5 176082.7 175772.9 -217804.7 175485.3 -208453.6 175797.2 175836 175378.8 176056.9 -222809.6 -216905.9 176731.7 175397.8 -219552.3 125043.9 -220132.5 176232.1 176584.3 -210573.1 176481.7 176486.4 -218419.8 0 
+175939.5 -217672.3 176084.2 175773.5 -217805.9 175485.6 -208440.9 175798 175834.7 175379.2 176058 -222804.1 -216907.6 176733 175397.4 -219554.9 125235.1 -220123.3 176231.8 176586.5 -210578.2 176483.1 176486.6 -218417.5 0 
+175939 -217674 176084.9 175773.5 -217805.5 175485.4 -208450.6 175798.9 175834 175379.8 176058.9 -222799.5 -216916.5 176733.3 175397.2 -219565.3 125443.8 -220097.3 176231.5 176588 -210559.4 176484.6 176486.7 -218413 0 
+175938.1 -217677.7 176082.8 175772.9 -217806.3 175484.8 -208509 175798.7 175834.5 175379.5 176059.3 -222799.9 -216920.9 176731.3 175397.7 -219605.8 125683.2 -220071.5 176231.8 176586.6 -210549.4 176484.7 176486.2 -218414.6 0 
+175936.8 -217678.4 176080.7 175772.3 -217809 175484.3 -208579.9 175798 175835.2 175378.8 176058.6 -222801.4 -216920.1 176730.2 175397.4 -219656.9 125933.8 -220064.2 176232.3 176584.9 -210569.9 176484.2 176485.5 -218418.6 0 
+175936.2 -217674.3 176081.1 175771.7 -217809.2 175484.1 -208635.4 175797 175834.8 175378.5 176057.3 -222802.8 -216924.1 176730.9 175396.1 -219697.5 126168.9 -220077.4 176231.8 176583.7 -210603.2 176482.9 176484.5 -218419.3 0 
+175936.7 -217668.5 176082.4 175772 -217806.8 175484.5 -208670.2 175796.9 175834.2 175378.1 176057.2 -222806.3 -216931.6 176731.4 175395.6 -219740 126395.7 -220097.8 176231 176583.3 -210622.5 176482.1 176484.6 -218417.3 0 
+175937.5 -217665.7 176083.1 175772.7 -217808.3 175485.6 -208672.4 175798.4 175835 175378.3 176057.9 -222809.3 -216932.2 176731.4 175396.8 -219786.1 126613.5 -220119.4 176231 176585.1 -210624.3 176483.5 176486.6 -218415.9 0 
+175937.8 -217667.3 176082.9 175772.1 -217815.6 175485.3 -208649 175798.6 175835.9 175379.2 176057.8 -222809.1 -216926.5 176730.9 175397.8 -219810.5 126780.5 -220141.5 176231 176586.4 -210616.9 176483.5 176486.4 -218417.6 0 
+175937.5 -217670.7 176082.1 175771.9 -217821.9 175483.7 -208626.4 175797.1 175836 175380.1 176057.2 -222807.3 -216924.4 176730 175397.2 -219808.3 126888.9 -220148.6 176230.4 176584.8 -210604.1 176480.9 176483.5 -218420.3 100 
+175937.6 -217671.6 176081.4 175772.9 -217822.5 175483.2 -208610 175796.3 175835.9 175380 176057.1 -222806.9 -216927.9 176729.6 175396.5 -219799 126975.6 -220135.5 176230.7 176583.9 -210597 176479.3 176482.3 -218420.4 0 
+175938.4 -217669 176081.1 175772.6 -217821.3 175484.1 -208578.5 175796.3 175835.1 175378.1 176057.3 -222809 -216930.3 176729.5 175396.1 -219789.3 127037.6 -220122.1 176231.7 176585.2 -210612.1 176479.9 176483.5 -218418.8 0 
+175938.7 -217667.9 176081.9 175770.9 -217821.2 175485.1 -208507.9 175796.9 175834.1 175375.5 176057.5 -222810.7 -216929 176730 175396 -219776.4 127044.7 -220118.4 176231.9 176585.7 -210645.3 176480.8 176484.7 -218415.7 0 
+175938.7 -217672 176082.9 175770.2 -217819.1 175485.6 -208406 175797.5 175834.8 175375.5 176057.7 -222810.4 -216927.2 176731.5 175396.7 -219771.5 126997.8 -220122.5 176231.4 176584.6 -210676.1 176481 176485.3 -218413 0 
+175939 -217676.1 176082.9 175771.3 -217812.9 175485.6 -208339 175797.3 175836.4 175377.7 176057.8 -222807.9 -216925.2 176732 175397 -219782 126924.9 -220134.2 176230.7 176583.9 -210689.9 176481.3 176485.5 -218413.6 0 
+175938.3 -217674.9 176082.9 175772.4 -217809.8 175485.4 -208365.1 175798.1 175836.8 175379.1 176057.8 -222803.6 -216923.3 176731.1 175395.8 -219794.2 126863.9 -220138.3 176230.6 176585.3 -210681.6 176483 176485.8 -218415 0 
+175937.3 -217671.6 176083.5 175772.8 -217815 175485.3 -208436.3 175799.1 175835.7 175378.6 176057.9 -222803 -216927.1 176730.5 175395.5 -219800.3 126838.8 -220120.8 176231.5 176587.3 -210652.2 176484.3 176485.6 -218416.7 0 
+175938.7 -217669 176084.1 175773 -217822 175484.9 -208458.3 175798.6 175835.2 175377.9 176057.8 -222808.2 -216934.1 176730.8 175397.4 -219820.8 126834.3 -220099.9 176232.2 176587.7 -210607.2 176483.3 176484.7 -218421.7 0 
+175940.9 -217667.4 176084.6 175773.6 -217823.8 175484.9 -208424.8 175799 175836.1 175378 176058 -222811.3 -216937 176731.2 175398.9 -219871.5 126841.7 -220095.1 176232.1 176587.4 -210555.2 176482.5 176484.8 -218424.3 0 
+175940.4 -217667.9 176085.2 175773.8 -217822 175485.9 -208400.8 175799.8 175837.2 175378.5 176059.1 -222808.9 -216937.6 176731.7 175398.8 -219931.1 126907.2 -220093.6 176231.9 176586.5 -210516.1 176482.5 176485.7 -218421.6 0 
+175937.6 -217670.2 176084.7 175772.7 -217819.4 175485.6 -208421 175797.9 175836.8 175379.2 176059.8 -222806.8 -216937.9 176732.1 175398 -219974.7 127054.9 -220081.4 176231.7 176584.5 -210519 176481.7 176484.9 -218420.9 0 
+175935.1 -217670.3 176083.3 175770.8 -217815 175484 -208468 175796.4 175835.6 175379 176058 -222806.5 -216933 176731.9 175397.3 -220001.1 127204.4 -220070 176231.4 176584.3 -210568.6 176482 176483.9 -218421.7 0 
+175934.5 -217668 176082.3 175769.9 -217810.2 175483.9 -208504.6 175798.3 175834.5 175378.5 176055.4 -222806.7 -216925.8 176731.3 175397 -220007.8 127271.3 -220077.8 176231 176586.2 -210628.3 176483.6 176484.8 -218419.3 0 
+175936.4 -217666 176080.8 175770.5 -217808.8 175484.9 -208507.3 175798.6 175833.5 175378.5 176054.3 -222810.1 -216927.4 176730.5 175396.3 -219984.5 127283 -220105 176230.8 176585.7 -210665.8 176482.3 176485.4 -218419.7 0 
+175939.8 -217665.4 176080.1 175771.4 -217807.2 175484.6 -208486.4 175795.6 175833.3 175378.2 176054.4 -222813.6 -216937.2 176729.8 175395.3 -219939.1 127288.3 -220130.7 176230.8 176583.8 -210692.7 176479.2 176484.1 -218426.1 0 
+175940.9 -217665.4 176081.9 175771.7 -217800.8 175484.3 -208458.1 175795 175834.4 175377.7 176054.9 -222809.5 -216941.1 176730.2 175395.6 -219895.9 127276.9 -220136.7 176231.3 176585 -210732.8 176479.2 176483.9 -218428.1 0 
+175938.4 -217667 176083.2 175771.3 -217798.8 175485.2 -208417.7 175797.6 175835.1 175377.5 176056 -222800.9 -216934.8 176730.8 175396.4 -219870.8 127246.7 -220130.6 176231.5 176587 -210788.4 176481.8 176485.2 -218422.6 0 
+175936.4 -217670.4 176081.6 175770.7 -217805.4 175485.6 -208369.7 175798.1 175835 175376.9 176057.5 -222798.1 -216928.9 176730.8 175396.7 -219859.6 127210.9 -220127.5 176231.6 176586.4 -210844.4 176482.8 176485.1 -218419 0 
+175937.9 -217670.9 176081.4 175771.3 -217806.9 175485 -208336.6 175797.1 175835.1 175376.7 176059.4 -222800.6 -216929.6 176731.9 175397 -219846.6 127174.9 -220123.5 176232.6 176586.5 -210878.9 176482.9 176485 -218420.1 0 
+175940.5 -217667.3 176083.5 175773.5 -217798.3 175485.2 -208325.8 175797.6 175834.5 175377.5 176060.8 -222802 -216932.3 176734 175397.2 -219823.3 127167.9 -220114.7 176234 176588.2 -210895 176483.7 176486.1 -218422.8 0 
+175941.3 -217666.3 176083.7 175774 -217789 175485 -208334 175797.3 175833 175377.8 176059.8 -222803.5 -216933.4 176732.8 175396.4 -219796.6 127188.8 -220113.1 176233.3 176587.5 -210925.8 176483.1 176485 -218429 0 
+175939.7 -217668.8 176082.3 175772 -217784.5 175484 -208361.5 175796.1 175832.8 175377.7 176057.5 -222806.3 -216934.7 176729.6 175396 -219774.8 127181.1 -220119.9 176230.8 176585.3 -210968.2 176481.3 176482.2 -218433.8 0 
+175937.6 -217670 176082.1 175770.8 -217786.5 175484.8 -208401 175796.7 175834 175378 176056.6 -222806.2 -216938.1 176730 175396.1 -219756.5 127148 -220120.9 176229.8 176585.2 -210984.5 176481.7 176482.5 -218428.5 100 
+175936.9 -217672.2 176082.8 175771.8 -217794.2 175486.5 -208450.4 175796.8 175835.1 175378 176056.7 -222804.2 -216939.8 176732.3 175395.4 -219737.5 127152.7 -220120.3 176230.9 176585.3 -210984.6 176483.3 176485.5 -218421.3 0 
+175937.2 -217676.2 176082.9 175772.3 -217800.1 175485.9 -208508.1 175794.5 175835.4 175377.2 176056.9 -222804.7 -216936.2 176731.8 175394.2 -219714.8 127213 -220128.6 176232.2 176584 -211015 176483.2 176486.1 -218421.8 0 
+175937.5 -217674.9 176082.8 175771.6 -217799 175484.2 -208550.3 175793.6 175835.2 175375.9 176057.8 -222803.8 -216931.9 176730.5 175394.4 -219685.3 127312.7 -220130.4 176232.5 176584.2 -211087.6 176482.6 176484.4 -218422.3 0 
+175938.3 -217668.2 176083.1 175771 -217794.1 175484.3 -208561.8 175795.7 175835 175375.8 176059.1 -222801.1 -216932.1 176731.4 175396 -219658.8 127431.5 -220116.3 176232.1 176585.7 -211176 176483.1 176483.8 -218417.5 0 
+175939.1 -217664.2 176083.7 175772.1 -217791.2 175485.1 -208561.3 175797.4 175834.9 175377.9 176059.3 -222801.4 -216935.1 176732 175396.8 -219649.9 127542.7 -220107.8 176231.8 176586.3 -211245.5 176483.3 176484.8 -218415 0 
+175938.5 -217664.9 176083.4 175773.1 -217793.9 175484.1 -208567.7 175796.7 175834.9 175379.9 176058.4 -222802.5 -216934.2 176731.4 175396.4 -219646.8 127654.1 -220114.3 176231.3 176586 -211282.1 176482.2 176485.7 -218418.3 0 
+175936.4 -217668.1 176082.6 175772.4 -217799.5 175482.4 -208577.2 175795.4 175835 175379.2 176056.9 -222799.7 -216924.6 176731.2 175396.3 -219624 127809.1 -220116.8 176229.9 176585.3 -211311.8 176481.4 176485.4 -218421.2 0 
+175934.4 -217672 176083.2 175771.8 -217802 175482 -208581.2 175795.9 175835.2 175377.8 176055.4 -222796.5 -216913.5 176731.3 175396.3 -219581.8 128020.8 -220107 176229.1 176584.9 -211365.9 176481.9 176484.4 -218419.1 0 
+175934.6 -217674.1 176083.4 175772.3 -217799.7 175483.2 -208598.1 175796.6 175834.6 175378.4 176055 -222798 -216914.8 176731.1 175395.6 -219545.8 128233.6 -220094.5 176229.7 176584.9 -211442 176481.5 176483.6 -218415.5 0 
+175937.2 -217673.2 176082.4 175772.8 -217797.4 175484.7 -208653 175795.5 175833.9 175379.2 176056.3 -222801.7 -216924.2 176730.8 175394.6 -219529.7 128376.8 -220083.7 176230.3 176584.2 -211522.2 176480.4 176483.5 -218415.3 0 
+175939 -217671.1 176082.7 175773.2 -217799 175485.1 -208722.9 175795.2 175834.3 175378.4 176057.6 -222802.4 -216924 176730.3 175394.3 -219524.1 128449.1 -220080.7 176229.8 176583.8 -211588.5 176481 176484 -218418.5 0 
+175937.6 -217669 176083.6 175773.9 -217802.3 175485 -208765 175797 175835.1 175377 176057.8 -222800.6 -216913.1 176731 175395.8 -219525.8 128509 -220092.7 176229.6 176585.3 -211618.5 176481.7 176484.5 -218421.5 0 
+175935.4 -217667.2 176082.3 175773.6 -217803.6 175485.2 -208781.9 175797.9 175835.7 175376.6 176057.4 -222799.8 -216907.8 176732.5 175397.7 -219544.7 128612 -220107.8 176230 176587 -211622.1 176480.7 176484.4 -218421.1 0 
+175935.6 -217668.1 176081.5 175772.9 -217804.1 175485.6 -208802.7 175797.8 175836.7 175377.8 176057.1 -222799.6 -216915.2 176732.1 175398.3 -219579.8 128779.6 -220113.5 176231 176587 -211654.8 176480.5 176484 -218418.4 0 
+175937.3 -217672.2 176083.4 175772.7 -217805.3 175486.4 -208841.5 175798.6 175836.9 175379.4 176057.3 -222799 -216924.8 176730.4 175397.7 -219627.1 128978.8 -220120.2 176232.3 176586.5 -211737.3 176481.3 176483.6 -218417.7 0 
+175937.6 -217674.8 176084.2 175773 -217804.5 175486.7 -208893.9 175798.5 175836.2 175379.3 176058.1 -222800.2 -216927.7 176730.2 175396.8 -219689.9 129155.8 -220131.4 176232.1 176586.5 -211822.3 176481.2 176483.4 -218419.6 0 
+175936.8 -217673.7 176082.5 175773.9 -217801.3 175485 -208945.7 175796.5 175835.8 175377.2 176058.1 -222803.3 -216926.8 176731 175396.3 -219748.7 129310.1 -220134.7 176230.2 176586.2 -211877.2 176480.6 176483.6 -218420.8 0 
+175938 -217671.1 176081.2 175774.9 -217800.9 175483.5 -208985.6 175795.4 175835.9 175375.9 176057 -222803.9 -216925.7 176730.5 175396.3 -219770.3 129474.8 -220131.3 176229.5 176585.2 -211928.5 176480.1 176484 -218419.6 0 
+175940.5 -217667.1 176081.7 175774.4 -217806.1 175484.5 -209009.5 175796.5 175835.4 175377 176057.4 -222801.3 -216922 176730 175395.8 -219760.3 129655.6 -220129.9 176231.2 176584.3 -211996.7 176480 176484.7 -218419.4 0 
+175940.2 -217661.7 176082.3 175772.5 -217810.7 175486.3 -209021 175797.7 175834.8 175378.2 176059.8 -222798.7 -216917 176731.2 175395.6 -219755.8 129822.3 -220126.2 176232.2 176584.1 -212052.8 176481.7 176486.3 -218421.7 0 
+175938.2 -217658.8 176081.7 175771.2 -217808 175486.7 -209034.1 175797.5 175834.8 175377.8 176060.1 -222796.3 -216917.1 176732.5 175396.6 -219769.8 129947.1 -220117.3 176230.6 176584.4 -212082 176483.9 176486.6 -218423.8 0 
+175937.5 -217661.4 176080.6 175771.2 -217799.1 175485.4 -209063.6 175796.1 175834.9 175377.1 176057.1 -222794.6 -216923.3 176731.8 175396.8 -219781 130052.9 -220110.5 176228.7 176584.3 -212120.4 176482.7 176484.6 -218424.8 0 
+175937.8 -217665.7 176080.6 175771.4 -217791.5 175483.1 -209112 175794.7 175834.7 175376.8 176055 -222796.1 -216928.2 176730.3 175395.5 -219776.3 130187 -220111.6 176229.1 176583.6 -212189.5 176480.1 176483.1 -218424.9 0 
+175937.7 -217664.7 176082 175771.7 -217788.9 175482.1 -209166.9 175795.1 175835 175377.1 176056.1 -222801.1 -216926.6 176729.4 175395.1 -219773.7 130356.1 -220117.9 176230.7 176583.8 -212266.6 176480.5 176484.3 -218422.9 0 
+175937.2 -217658.8 176083.1 175773.4 -217790.5 175482.8 -209210 175797.2 175835.6 175377.7 176057.9 -222808.1 -216921.4 176729.1 175396.4 -219792.3 130543.7 -220127.6 176230.9 176585 -212328.8 176482 176484.7 -218421.7 0 
+175936.5 -217654.9 176082.7 175774.7 -217794.6 175484.1 -209231.2 175798.7 175834.8 175377.8 176057.5 -222811.4 -216917.7 176729.1 175397 -219815.2 130749 -220146.2 176230 176586.3 -212361.1 176481.5 176483 -218422.6 0 
+175936.7 -217655.5 176081.9 175773.1 -217797.4 175484.8 -209243.6 175797.7 175833.2 175377.8 176055.7 -222806.1 -216919.2 176729.1 175396 -219812.9 130962.9 -220164.7 176229.8 176586.1 -212362.4 176481.3 176482.5 -218422.1 0 
+175937.3 -217659 176081.2 175770.7 -217795.8 175484.5 -209271.7 175794.6 175832.8 175377.8 176055.3 -222798.7 -216924 176729.4 175394.9 -219789.4 131158.2 -220157.2 176229.8 176583.8 -212358.2 176482 176483.3 -218421.2 0 
+175936.5 -217664.1 176080.9 175769.8 -217793.4 175484 -209311.5 175793 175832.9 175377.7 176056.6 -222797 -216922.1 176730.6 175394.5 -219768.7 131313 -220126.9 176229.5 176582.4 -212358.9 176481.5 176483.2 -218421.1 0 
+175936 -217667.1 176082.1 175770.6 -217795.8 175484.6 -209339.6 175795.3 175833 175378.2 176057.6 -222800.6 -216909.7 176731.6 175394.9 -219756 131418.6 -220106.3 176230.4 176584.5 -212349.5 176481.5 176483.9 -218417.8 0 
+175937.1 -217665.9 176083.5 175772.3 -217801.6 175485.5 -209357.8 175798.3 175834 175379.2 176057.6 -222806 -216897.6 176731.2 175396 -219746.4 131477.4 -220104.8 176231.6 176587.1 -212344 176483.2 176485 -218413.9 0 
+175937.7 -217663.8 176082.5 175773.1 -217804.2 175484.8 -209375.5 175798.4 175835.2 175378.9 176057.8 -222807.7 -216894.4 176729.8 175397.1 -219749.5 131506.4 -220111.6 176230.6 176587.1 -212375.3 176483.1 176484.5 -218417.1 0 
+175936.1 -217662.3 176080.8 175772 -217798.4 175483.7 -209380.7 175797.3 175835.2 175377 176058.1 -222800.7 -216898.3 176728.9 175396.7 -219772.7 131528 -220123.9 176229.3 176586 -212435.4 176480.8 176484.4 -218422.1 0 
+175933.8 -217660 176080.5 175769.9 -217789.8 175483.5 -209371.5 175796.6 175834.4 175375.4 176057.5 -222790.4 -216906.2 176729 175394.5 -219798.6 131553.1 -220135.1 176229.3 176585.4 -212495.7 176480 176485.4 -218419.3 0 
+175934 -217658.4 176080.2 175769.2 -217789.6 175483.1 -209366.5 175795.9 175833.8 175375.5 176056.5 -222787.8 -216916.5 176729.2 175393.2 -219802.7 131583.6 -220133.8 176229.2 176584 -212556.8 176480.7 176484.7 -218412.7 0 
+175936.6 -217659.6 176080.7 175770.9 -217797.1 175482.2 -209371.3 175795.8 175833.4 175377.2 176057.1 -222794.3 -216925.7 176729.2 175393.8 -219785.9 131596.6 -220126.8 176229.2 176582.5 -212624 176481.1 176483.1 -218410.3 0 
+175938.4 -217662.9 176083.1 175773.1 -217803.1 175482.8 -209386.8 175796.7 175834.1 175378.3 176059 -222799.7 -216930.3 176729.8 175394.6 -219776.8 131551.9 -220128.4 176230.9 176583.5 -212684.6 176481.5 176483.6 -218411.3 0 
+175937.4 -217666.9 176084.4 175773 -217807.5 175484.4 -209424.2 175797.1 175835.6 175378.4 176059.4 -222798.3 -216930.5 176730.1 175395 -219794.5 131463.3 -220138.2 176232.1 176585.3 -212734.7 176481.2 176484.4 -218412.9 0 
+175935.6 -217670.3 176082.4 175771.6 -217811 175484.3 -209477.8 175796.6 175835.5 175378.6 176057.5 -222795 -216927.8 176729.1 175395.4 -219821.3 131393.2 -220147.4 176231.3 176585.3 -212778.4 176480.3 176483.5 -218414 0 
+175935.5 -217671.3 176080.4 175770.9 -217809.3 175484.1 -209527.3 175798.3 175833.6 175378.4 176056.4 -222794 -216919.2 176729.3 175395.8 -219828.2 131367.2 -220147.4 176231.3 176586.4 -212817.9 176482.4 176483.2 -218411 0 
+175936.1 -217669.8 176080.4 175770.9 -217805.1 175485.5 -209568 175801.1 175832.9 175377.3 176057.1 -222795.6 -216906.8 176731.6 175395.8 -219810.8 131360.1 -220130.6 176232.5 176588.5 -212870.1 176485.7 176483.7 -218405.5 0 
+175936.1 -217668.7 176080.1 175771.2 -217806.4 175485.5 -209599.1 175798.2 175833.4 175376.6 176056.6 -222800.1 -216904 176732.2 175395.3 -219783.4 131331.4 -220106.6 176231.7 176586.5 -212941.2 176483.7 176482.5 -218406.8 0 
+175936.3 -217668.6 176079.6 175770.8 -217810.8 175483.3 -209614.1 175792 175833.2 175376.8 176054.6 -222802.8 -216912.8 176730.1 175394.6 -219752 131263.3 -220096.5 176229.7 176582.4 -212990.7 176479.1 176481.4 -218412.5 0 
+175937.5 -217669.7 176080.7 175769.5 -217812.2 175481.7 -209622.3 175791.9 175832.6 175376.9 176054.2 -222800 -216916.9 176729.2 175394 -219715.2 131191.3 -220106.7 176229.6 176581.6 -212983.4 176478.5 176483 -218413.4 0 
+175937.7 -217674.7 176081.9 175769.1 -217811.4 175480.5 -209642.9 175795.2 175832.5 175376.3 176055.2 -222798.5 -216913.5 176729.8 175393.7 -219681 131156.5 -220125.5 176230.5 176582.5 -212944.8 176480 176484.3 -218414.7 0 
+175936.3 -217680.9 176081 175770.1 -217810.5 175479.8 -209668.1 175794.3 175833.4 175375.7 176055.3 -222803.2 -216915.7 176729.4 175393.5 -219659.6 131141.5 -220143.3 176230.2 176581.7 -212921.9 176479.8 176482.8 -218422.6 0 
+175935.5 -217682 176079.7 175770.6 -217807.1 175481.5 -209670 175792.9 175834.2 175376.2 176055.1 -222806.8 -216921 176728.4 175393.6 -219645 131086.4 -220158.2 176229.2 176581.5 -212932.7 176479.6 176481.5 -218425.9 0 
+175936.6 -217681.2 176080.1 175770.2 -217804.3 175484.2 -209664.6 175794.9 175833.2 175376.9 176055.5 -222804.9 -216920.8 176728.7 175394.3 -219620.2 130963.2 -220170.7 176228.6 176584 -212967.1 176480.5 176482.7 -218419.6 0 
+175938.2 -217684.5 176080.8 175770.3 -217810.2 175484.9 -209700.2 175795.4 175831.5 175376.9 176055.5 -222804.6 -216921.4 176729.3 175395.9 -219577 130826.4 -220176.8 176228.7 176585.3 -213000.8 176480.8 176483.6 -218415.7 0 
+175939 -217689.4 176080.6 175770.2 -217820.8 175483.7 -209765.3 175793.2 175831.5 175376.6 176055.1 -222807.4 -216924.2 176728.8 175397.5 -219527.8 130749.9 -220172.5 176229.2 176583.8 -213007.4 176480.7 176482.8 -218417.8 0 
+175937.8 -217691 176080.9 175770 -217823.4 175482.8 -209804.7 175793.6 175833 175376.8 176054.7 -222806.4 -216922 176728.2 175397 -219495.8 130740 -220160.5 176229.7 176582.7 -212982.6 176481.7 176482.4 -218418 0 
+175935.3 -217689.6 176081.5 175770.9 -217817.3 175483.1 -209810.3 175795.7 175833.8 175377.3 176054.2 -222803.3 -216918.9 176728.8 175395 -219493.2 130759.5 -220147.5 176229.9 176582.7 -212947.9 176481.9 176483.1 -218415.5 0 
+175934.5 -217688.6 176081.8 175771.8 -217810.2 175484 -209806 175795.2 175834.3 175377.8 176054.7 -222805.2 -216915.5 176729.4 175394.1 -219512.3 130771.3 -220139.5 176230 176582.1 -212913.6 176480.2 176483.8 -218413.7 0 
+175936.2 -217688.2 176081.6 175771.7 -217805.3 175483.7 -209802.5 175794.4 175834.5 175378 176055.6 -222808.8 -216904.5 176728.5 175394.5 -219533.1 130756.4 -220142.5 176229.5 176582.1 -212857.8 176479 176483.5 -218412.5 0 
+175936.7 -217682.1 176080.7 175771.6 -217801.9 175482.5 -209805.8 175796.5 175833.8 175377 176055.5 -222807.8 -216898.3 176727.6 175394.1 -219537.2 130758.3 -220155.1 176229.1 176583.9 -212775.3 176480.7 176483.2 -218410.5 0 
+175935.5 -217668.8 176080.3 175771.5 -217799.5 175482.5 -209828.9 175798.6 175833 175376.5 176054.9 -222805.7 -216911 176728.8 175393 -219527.7 130834.9 -220169.3 176230.1 176585.5 -212716.3 176483.9 176484 -218409.6 0 
+175936.4 -217660.1 176081 175771.2 -217795.9 175483.6 -209882.9 175796.5 175832.5 175377.9 176055 -222807.7 -216928.6 176730.7 175392.9 -219526.2 130945.3 -220182.3 176231.3 176584.9 -212721.6 176483.8 176483.9 -218413.7 0 
+175938.8 -217663 176081.4 175770.7 -217791.3 175484.1 -209945.5 175792.6 175833 175379.5 176055.1 -222810.8 -216934.7 176731.2 175394.6 -219531.1 130996.3 -220190.3 176230.9 176582.9 -212758.7 176480.1 176482 -218420.9 0 
+175938 -217668.2 176080.8 175769.9 -217791.1 175483.4 -209961 175792 175833.5 175378.7 176055.3 -222809 -216933 176730.4 175396.5 -219513.1 130960.6 -220186 176230.4 176582.5 -212766.2 176479 176482 -218422.9 0 
+175935.2 -217667.9 176080 175769.5 -217795.4 175482.5 -209924.4 175794.5 175832.8 175376.9 176056 -222804.6 -216931.9 176729.3 175396.6 -219467.8 130889.4 -220173.4 176230.7 176583.7 -212722.7 176480.8 176484.4 -218418.3 0 
+175935.4 -217666.6 176079.5 175770.4 -217797.9 175482.3 -209912.2 175795.7 175832.1 175376.5 176056.5 -222804.8 -216932 176728.9 175395.5 -219420.1 130849.5 -220162.7 176230.5 176583.6 -212652.6 176480.2 176484.9 -218416.3 0 
+175937.5 -217667.5 176079.7 175771.7 -217798 175482.5 -209978.6 175794.6 175832.7 175376.8 176056.4 -222808.2 -216929.1 176730.1 175394.3 -219384 130834.1 -220153.5 176229.6 176582 -212587.9 176478.6 176483.9 -218418.8 100 
+175937.4 -217667.4 176080.7 175771.9 -217799.5 175482.9 -210092.3 175794.4 175832.9 175375.9 176056 -222806.9 -216923.6 176731.2 175392.9 -219364.1 130770.9 -220142.3 176228.8 176581.7 -212537.7 176480.1 176484 -218418.6 0 
+175936.1 -217665.9 176081.2 175771.5 -217800.7 175483.3 -210204.6 175795.4 175831.7 175374.7 176055.7 -222802.8 -216923 176730 175392.4 -219358.7 130659.1 -220134.7 176228.3 176582.2 -212501.5 176481.6 176483.7 -218416.2 0 
+175935.6 -217664.7 176080.2 175771 -217799.2 175482.7 -210303.1 175795.1 175831 175375 176054.8 -222803.1 -216928 176728.1 175393.7 -219350.1 130572.8 -220136.1 176228 176581.5 -212491.7 176480.1 176482.3 -218416.7 0 
+175936.1 -217663.7 176079.6 175770.5 -217796.8 175481.4 -210388.2 175793.9 175832.5 175376.6 176054.7 -222805.1 -216930.9 176728.4 175395.5 -219328.6 130527.2 -220143.7 176227.9 176581.8 -212512.2 176479.3 176482.2 -218417.1 0 
+175937.6 -217662.4 176080 175770.6 -217797.6 175481.2 -210459.8 175793.3 175834.3 175378 176056.5 -222804.9 -216932.1 176728.9 175396.1 -219301.8 130481.8 -220152.6 176228.9 176583.8 -212541 176480.5 176483.2 -218414.2 0 
+175938.3 -217659.7 176079.7 175771.3 -217801.4 175481.8 -210520.1 175792.8 175834 175377.5 176057.1 -222806 -216936 176727.1 175395 -219272.4 130432.3 -220161 176229.9 176584.3 -212558.3 176479.8 176482.4 -218412.9 0 
+175936.1 -217655.7 176079.7 175771.5 -217801.5 175481.9 -210550 175793.5 175832 175375.2 176055.1 -222807.5 -216934.3 176725.7 175393 -219246.7 130407 -220166.7 176228.2 176583.3 -212565.1 176478.5 176480.3 -218414.2 0 
+175932.9 -217653.3 176081.1 175770.6 -217795.7 175482.5 -210527.2 175795.6 175830.7 175373.3 176053.9 -222805.9 -216922.3 176726.8 175392.9 -219238.6 130421.1 -220167.1 176225.5 176584.1 -212562.1 176480 176480.5 -218413.5 0 
+175932.5 -217653.9 176081.7 175769.7 -217791.3 175484.3 -210477.8 175796.7 175831.3 175374.1 176055.2 -222805.8 -216912.6 176728.4 175394.9 -219244.7 130463.9 -220161.4 176226.6 176585.3 -212544.7 176481.8 176483.1 -218412.1 0 
+175935.8 -217654.8 176080.4 175769.6 -217791 175484.9 -210453.7 175796.8 175832.9 175376.6 176056.2 -222808.6 -216911.8 176728.6 175395.8 -219255.3 130518.8 -220153.6 176230.6 176584.9 -212516.7 176481.2 176484.8 -218414.8 0 
+175938.1 -217655.3 176079.5 175769.9 -217790.3 175483.3 -210467.6 175796.1 175833 175377.8 176055.6 -222807.7 -216913.4 176727.6 175394.3 -219278.7 130583.5 -220152.6 176231.6 176583.5 -212488.8 176479.4 176484.2 -218418.8 0 
+175936.3 -217658.7 176079.8 175769.8 -217791.4 175481.7 -210492.3 175794.1 175831.2 175377.2 176055 -222803.3 -216916 176726.3 175393.5 -219318.6 130657.1 -220158.9 176229.3 176581.9 -212469.6 176478.1 176482.5 -218416.8 0 
+175933.7 -217665.1 176080.5 175769.4 -217797.5 175482.1 -210508.9 175792.6 175829.9 175376.2 176054.9 -222801.7 -216923.8 176726.8 175394.5 -219358 130728.1 -220163.4 176228.3 176581.3 -212466.7 176478.4 176482.1 -218410.5 0 
+175934.7 -217668.5 176081.2 175769.4 -217802 175483.5 -210528.7 175793.3 175831.3 175375.3 176055.6 -222802.8 -216933.7 176729.2 175395.4 -219382.8 130781.5 -220166.5 176228.8 176582.4 -212476.8 176480 176483.4 -218408.5 0 
+175936.8 -217666.3 176081.1 175769.9 -217799.5 175483.8 -210572.1 175794.5 175833.1 175375 176056.6 -222803.5 -216936.5 176730.2 175395 -219394.8 130799.6 -220176 176228.6 176583.4 -212481.6 176480 176483.8 -218413.4 0 
+175936.9 -217662.8 176080.1 175769.5 -217796.1 175482.9 -210632.8 175793.8 175832.6 175375.2 176057 -222804.6 -216930.9 176728.3 175394 -219409.1 130778.6 -220188.7 176227.8 176582.6 -212469.8 176477.9 176482.1 -218417.6 0 
+175936 -217663.1 176079.1 175768.3 -217798 175482.2 -210678.5 175792.8 175831.4 175375 176057.3 -222805.3 -216925 176726.7 175392.9 -219432.4 130736.1 -220195 176227.5 176581.7 -212448.5 176477.4 176480.9 -218415 0 
+175935.3 -217668.2 176079.5 175767.2 -217803.4 175482.1 -210686.4 175793.3 175832.1 175374.1 176058.1 -222800.9 -216923 176727.8 175391.9 -219451.4 130685 -220192.1 176227.7 176582.1 -212421.6 176479.6 176481.9 -218409.5 0 
+175934.5 -217672.2 176080.7 175767.2 -217806.3 175482.6 -210653.8 175794.2 175833 175373.8 176058 -222795 -216920.1 176729.6 175391.9 -219460.5 130633.6 -220185.3 176228 176582.9 -212387.5 176481.5 176482.3 -218407.5 0 
+175934.8 -217669.8 176081.2 175768.3 -217803.3 175483.3 -210584.5 175794 175833.3 175374.6 176056.9 -222797.8 -216913.1 176729 175393.6 -219464.7 130604.6 -220181 176228.7 176582.6 -212353.3 176480.7 176482 -218408.2 0 
+175936 -217665.5 176080.9 175769.8 -217797 175483.8 -210490.5 175793.7 175833.6 175376.1 176056.1 -222806.5 -216909.5 176727.8 175394.8 -219447.2 130628.4 -220179.6 176229.6 176582 -212322.3 176479.8 176482.9 -218409.4 0 
+175936.5 -217664.6 176080.4 175770.3 -217793.4 175483.3 -210389.2 175793.7 175833.5 175377.4 176055.1 -222808.3 -216919.3 176727.9 175394 -219400.5 130716.1 -220178.1 176229.6 176582.2 -212295.8 176480 176483.3 -218411.1 0 
+175937 -217664 176080.2 175770 -217794 175482.4 -210291.8 175793.8 175832.9 175377.5 176054.1 -222801.9 -216934 176728.4 175392.9 -219364.7 130829.3 -220174.9 176228.8 176583.1 -212291.4 176479.9 176482.6 -218412 0 
+175936.9 -217663.5 176080.5 175770.2 -217793.9 175482.3 -210209.4 175794.3 175832.7 175376.5 176054.4 -222796.8 -216935.3 176728.1 175393.1 -219364.2 130896.3 -220171.1 176227.9 176583.8 -212319.4 176479.7 176482.6 -218412.1 0 
+175935.3 -217671.3 176079.9 175770.4 -217792.9 175482.2 -210154.9 175794.3 175832.3 175375.5 176054.4 -222796 -216924.2 176727.8 175393.6 -219367.1 130902.2 -220166.1 176227 176583.4 -212347 176480.3 176482.6 -218414.3 0 
+175934.7 -217686.9 176079.1 175770.5 -217797 175481.8 -210110.6 175793.3 175832.5 175375.6 176054.3 -222795.6 -216912.9 176728.5 175393.8 -219356.6 130916.8 -220155.8 176227 176582.9 -212330.3 176479.9 176482.4 -218415.9 0 
+175936 -217696.9 176080 175771.5 -217804.6 175482.3 -210022.1 175793.6 175833.8 175376.8 176055.8 -222795.1 -216904.4 176729 175394.4 -219355.9 130987 -220144.6 176228.2 176583 -212274.3 176478.9 176483.5 -218412.8 0 
+175936.6 -217691.4 176080.7 175772.6 -217806.8 175483.5 -209867.9 175795.1 175834.6 175377 176056.8 -222796.9 -216903.6 176728.3 175394.5 -219369.9 131078.2 -220144 176228.6 176582.6 -212228.1 176478.4 176484.5 -218409.7 0 
+175936.3 -217676.6 176079.3 175771.5 -217802.8 175483.6 -209706.3 175795.2 175834 175375.3 176055.9 -222800.7 -216914.6 176727.5 175393.6 -219384.1 131140.8 -220153.8 176227.7 176581.9 -212230.2 176477.9 176483.7 -218410 0 
+175935.6 -217667.2 176078.7 175769.6 -217800 175482.7 -209597.9 175793.6 175833 175373.2 176055.8 -222803.3 -216924.9 176727.6 175392.9 -219402.8 131154.7 -220165.8 176227.5 176582.4 -212276.1 176477.6 176482.5 -218410.2 0 
+175934.1 -217667.3 176080 175769.4 -217801.4 175480.8 -209532.6 175791.9 175832.1 175372.3 176056.5 -222802.7 -216927.4 176727.9 175393.3 -219434.5 131136.5 -220172.8 176227.9 176582.7 -212336.6 176479 176481.8 -218408.8 0 
+175933.7 -217667.8 176080.2 175770 -217803 175479.2 -209474.2 175791.5 175831.4 175373 176055.7 -222802.2 -216927.6 176727.9 175393.6 -219475.2 131136.6 -220167.9 176228.5 176582.1 -212395.9 176480.8 176481.1 -218407.3 0 
+175935.6 -217664.5 176078.9 175769.3 -217801 175479.3 -209410.4 175792.6 175831.4 175374 176054 -222803.8 -216924.7 176727.8 175393.1 -219512.3 131166 -220154.6 176228.7 176581.3 -212442.9 176480.3 176480.8 -218408 0 
+175936.4 -217661.3 176078.4 175768.5 -217797.9 175480.7 -209348 175793.7 175832.6 175374 176053.4 -222803.6 -216918.3 176727.2 175392.9 -219538.4 131169 -220143.2 176228.1 176581 -212451 176477.9 176481.3 -218411.3 0 
+175934.5 -217661.8 176079.7 175769.7 -217798.6 175481.8 -209301.8 175794.6 175834.2 175374.2 176054.3 -222798.7 -216916.5 176726.7 175393.1 -219559.8 131121 -220135.3 176228 176581.7 -212424.5 176477.3 176482.3 -218411.8 0 
+175933 -217668.2 176080.9 175770.9 -217803.2 175482.8 -209275.4 175794.8 175834.5 175375.4 176054.7 -222796.1 -216922.3 176726.5 175392.6 -219572.9 131053 -220133 176228.6 176582 -212406.4 176478.3 176482.8 -218409.4 0 
+175933 -217675.5 176080.8 175770.4 -217805.2 175483.7 -209260.5 175794.4 175833.5 175375.9 176054.3 -222801.2 -216929.7 176726.7 175391.6 -219560.5 130973.1 -220136.2 176228.5 176580.8 -212395.3 176478.7 176483 -218408.5 0 
+175932.9 -217674.5 176081.4 175769.6 -217801.9 175483.7 -209268.9 175795.7 175832.1 175375.1 176055 -222805.9 -216929.4 176727.8 175392.2 -219530 130860.6 -220133.7 176228.5 176581 -212358.3 176480.3 176484 -218408.5 0 
+175933.7 -217669.5 176081.9 175769.6 -217800 175483.7 -209315.6 175796.4 175831.5 175374.5 176056.2 -222804.5 -216920.4 176728.7 175394.3 -219508.7 130721.6 -220126.3 176229.1 176582.8 -212318.4 176482.4 176483.9 -218411.7 0 
+175936.1 -217669.7 176080.2 175769.7 -217803.8 175483.5 -209376 175793.3 175831.7 175375.5 176055.8 -222803.5 -216916.2 176728.1 175395.3 -219499.7 130582.9 -220125.4 176229.1 176582.5 -212318 176479.7 176481.6 -218420.2 0 
+175937.4 -217671.2 176078.8 175769.6 -217807.9 175483.1 -209424.5 175791.3 175831.8 175376.9 176055.1 -222803.8 -216920.4 176727.9 175395.3 -219477.5 130455.4 -220126.4 176228.1 176581.5 -212348 176476 176480.8 -218423.3 0 
+175935.8 -217668.7 176080 175769.8 -217808.1 175483 -209477.8 175793.5 175831.2 175376.7 176055 -222801.8 -216920 176729.2 175395.7 -219423.8 130335.1 -220126.7 176226.8 176582.8 -212390.4 176477.3 176483.1 -218416.3 0 
+175933.3 -217666.8 176080.9 175769.7 -217805.6 175482.4 -209540.7 175795.1 175830.6 175374.8 176054.7 -222800.9 -216915.8 176729.7 175395.7 -219360.4 130218.6 -220135 176226.4 176583.8 -212450.3 176479.1 176484.4 -218411 0 
+175932.2 -217670.3 176080.3 175769.2 -217804.2 175480.4 -209573.8 175794.9 175831.1 175373.4 176054.3 -222801.8 -216914.8 176728.7 175394.7 -219318.9 130130.2 -220141.2 176227 176582.6 -212516.5 176478.6 176483.6 -218413.3 0 
+175932.5 -217677.8 176080 175769.5 -217806 175478.9 -209555.3 175795 175833.1 175374 176054.4 -222801 -216914.1 176727.8 175393.5 -219293.2 130105.8 -220134 176227.4 176581.1 -212565.3 176478.3 176482.9 -218414.3 0 
+175933.4 -217684.8 176080.2 175770.6 -217808.4 175479.5 -209516.4 175794.7 175834.5 175375 176054.8 -222801.1 -216916.6 176727.4 175393.1 -219268 130129.8 -220130.2 176227.1 176580.4 -212600.5 176478.6 176482.8 -218408.1 0 
+175934.4 -217686.5 176080 175769.6 -217810.5 175480 -209508.4 175793.3 175833.4 175374.3 176054.5 -222802.3 -216924.7 176727.4 175393.3 -219250 130176.5 -220136.9 176227.2 176579.8 -212644.8 176478.1 176482 -218401.3 0 
+175935.3 -217683.5 176078.9 175767.5 -217813.4 175479.9 -209548.3 175792.1 175831.7 175373.1 176053.9 -222800.3 -216928.3 176727.7 175393.1 -219248 130270.7 -220138.1 176227.9 176579.4 -212697.9 176477.2 176481.4 -218402.9 0 
+175936.9 -217682.4 176078.1 175767.3 -217814.3 175481.1 -209598.1 175791.6 175831.4 175373.5 176054.3 -222797.2 -216924.9 176727.7 175392.6 -219270.1 130394.5 -220132.1 176228.8 176579.7 -212738.6 176477.4 176481.6 -218408.1 0 
+175937.4 -217686.2 176077.4 175768.5 -217811.2 175482.3 -209615.6 175791.2 175830.7 175374 176055 -222798.3 -216924 176727 175391.5 -219315.2 130474 -220130.8 176228.8 176579.8 -212766.7 176478.4 176481.3 -218408.4 0 
+175934.3 -217689.5 176077.1 175768.4 -217808.3 175480.8 -209599.7 175790.7 175829.1 175372.8 176054.2 -222801.3 -216926.1 176726.4 175390.3 -219361.1 130525.4 -220138.7 176227.4 176579.4 -212798.2 176478.3 176480.3 -218407.6 0 
+175930.5 -217686.6 176079 175767 -217808.9 175479.1 -209560.5 175791.2 175829.5 175371.8 176053.4 -222800.7 -216924.4 176727.2 175390.5 -219392.3 130622.6 -220150.8 176226.8 176579.9 -212823.5 176477.8 176480.5 -218409.8 100 
+175930.8 -217680.3 176082 175766.7 -217809.5 175479.4 -209504.9 175793.2 175831.4 175372.8 176054.3 -222796.9 -216920.9 176728.4 175392.2 -219404.9 130748 -220160.5 176227.8 176581.8 -212828.2 176478.8 176481.7 -218410.2 0 
+175933 -217680.2 176082.9 175767.9 -217808 175480.1 -209453.2 175794.9 175831.8 175374.5 176054.7 -222795.1 -216921.5 176728 175392.9 -219395.2 130855.7 -220167.3 176227.8 176582.3 -212839.5 176480 176482.4 -218406.7 0 
+175934.1 -217689 176081.4 175768.9 -217806.8 175480.3 -209412.1 175793.4 175831 175375.5 176054 -222797.5 -216924.5 176726.4 175392 -219376.8 130963.9 -220171.9 176226.5 176580.3 -212886.1 176478.6 176481.5 -218404.9 0 
+175934.8 -217693.3 176079.5 175768.5 -217806.3 175481.3 -209361.1 175790.7 175830.9 175375 176053.6 -222800.3 -216924.9 176725.6 175391.4 -219363.1 131066.4 -220169 176226.2 176579.4 -212946.5 176476.3 176479.8 -218405.6 0 
+175935.3 -217682.1 176079 175767 -217804.9 175482.6 -209297.8 175791.6 175830.9 175374.1 176053.9 -222797.9 -216921.8 176726.6 175392.7 -219343.8 131122.8 -220162.6 176227.6 176582.1 -212991.4 176477.8 176480 -218402.7 0 
+175935.2 -217666.9 176079.9 175766.6 -217804.7 175482.8 -209246.8 175794.2 175830.7 175373.9 176054 -222794 -216918.7 176728 175394.2 -219318.5 131169.2 -220166.7 176228.8 176584.4 -213029.8 176481.1 176481.5 -218400.5 0 
+175935.4 -217662.5 176079.7 175767.9 -217806.7 175481.8 -209228.6 175793.4 175832 175373.7 176053.2 -222794.8 -216919.8 176727.6 175393.8 -219300.2 131267.8 -220179 176227.3 176583.2 -213075 176480 176481.3 -218408.4 0 
+175936.1 -217665.8 176078.1 175769.5 -217808 175480.7 -209254.2 175791.4 175833.1 175372.5 176052.4 -222796.1 -216922.4 176726.4 175392.5 -219286.7 131395.6 -220181.8 176224.3 176581.3 -213116.7 176476.8 176480.2 -218418 0 
+175935.9 -217669 176077.6 175769.9 -217808.3 175480.5 -209319.8 175792.3 175831.3 175372.3 176053 -222794.4 -216920.8 176726 175392.2 -219282.7 131500.1 -220171.4 176223.5 176581 -213150.4 176477.1 176480.4 -218417.9 0 
+175935.1 -217668.9 176078.2 175769.5 -217809.8 175481.2 -209400.8 175793.1 175829.3 175374.2 176054 -222795.7 -216918.9 176726 175392.7 -219297.4 131581.8 -220160.3 176225.8 176580.3 -213190.4 176478.5 176481.4 -218414.5 0 
+175934.9 -217663.4 176078.9 175768.7 -217810.7 175481.8 -209474.2 175792.3 175830.3 175376 176054.2 -222801 -216921.8 176726.9 175392.8 -219312.5 131669.6 -220155.2 176228 176579.3 -213240.1 176477.4 176481.5 -218413.7 0 
+175934.5 -217657.5 176079.3 175767.9 -217808.6 175481.3 -209529.9 175791.9 175832.1 175375.8 176053.5 -222804.7 -216924.7 176728.4 175391.7 -219311.7 131780.7 -220154.2 176228.3 176580.6 -213280.8 176476.1 176480.9 -218410.9 0 
+175933.8 -217658.6 176079 175768.1 -217806.1 175480.4 -209562.3 175792.3 175832.5 175374.8 176052.6 -222806.6 -216922 176729 175390 -219309 131916.6 -220155.7 176227.8 176582.8 -213290.9 176476.6 176480.6 -218406.6 0 
+175933.8 -217663.8 176078.6 175768.9 -217805.4 175480.3 -209562.8 175792.8 175832.3 175373.8 176052.6 -222807.1 -216920.3 176728.2 175389.5 -219311.4 132050.5 -220157.8 176227 176582.6 -213258.8 176477.4 176480.8 -218405.8 0 
+175933.9 -217663.8 176078.6 175768.8 -217805.8 175480.2 -209518.4 175792.7 175832 175373.3 176053.7 -222805.5 -216926.9 176727 175391.3 -219314.3 132140.5 -220156.1 176225.6 176581.1 -213185.4 176477.2 176481 -218408.3 0 
+175934 -217658.4 176079.3 175768 -217807.4 175480.2 -209444.4 175792.5 175831.4 175373.1 176054.7 -222804.3 -216936.3 176726.4 175394.1 -219336 132184.4 -220148.9 176225.7 176581.3 -213094.5 176478.1 176481.9 -218409.1 0 
+175935.3 -217655.9 176080.6 175768.4 -217810.4 175481.1 -209385.4 175793.7 175831 175373 176055.1 -222803.4 -216936.8 176727.5 175394.8 -219390.1 132222 -220142.3 176227.5 176582.6 -213029.5 176479.7 176483.2 -218405.2 0 
+175936.2 -217661.9 176081.5 175769.8 -217813.1 175481.5 -209354.7 175795.5 175831.4 175373.7 176054.9 -222800.6 -216928.5 176729.4 175392.5 -219448 132291.3 -220145.3 176228 176582.9 -213010.8 176479 176483.3 -218401.8 0 
+175935 -217672.2 176080.8 175769.5 -217814 175480.9 -209334.4 175795.8 175832 175375.2 176053.7 -222798.1 -216923.8 176729.5 175391 -219484.6 132408.6 -220152.8 176226.5 176582.4 -213012.1 176477.2 176482.2 -218405.5 0 
+175933.8 -217676.2 176079.9 175768.2 -217813.2 175481.3 -209323.6 175794.9 175831.7 175375.9 176052.5 -222797.6 -216928.7 176728.7 175392.3 -219519.7 132538.7 -220148.6 176225.5 176581.8 -213000.8 176478.1 176481.3 -218409 0 
+175934.3 -217671.5 176080.2 175768.1 -217811 175481.4 -209321.8 175794.6 175831 175375.5 176052.5 -222797.6 -216934.2 176728.9 175393.8 -219567.8 132606.8 -220134.9 176226 176581.5 -212991.3 176479.9 176481.2 -218404.1 0 
+175934.9 -217665.8 176080.3 175769.1 -217808.6 175479.5 -209306.9 175794 175830.6 175375.3 176053.1 -222798.2 -216931.8 176728.5 175393.1 -219603.9 132592.3 -220132.9 176227.2 176581.9 -213017.7 176479.4 176481 -218399.2 0 
+175934 -217661.6 176078.5 175769.4 -217807.4 175478.4 -209273.8 175792.5 175830 175375 176052.7 -222801.4 -216925.5 176727 175391.5 -219613.6 132542.5 -220149.2 176228.3 176582.7 -213077.3 176477.8 176480.6 -218402.9 0 
+175932.9 -217658.1 176077 175768.7 -217807.7 175479.5 -209257.2 175791.8 175829.7 175373.7 176051.9 -222806.4 -216921.6 176726.3 175390.7 -219622.2 132490.1 -220159 176228.3 176581.9 -213139 176476.8 176480.7 -218407.4 0 
+175933.8 -217659.8 176077.2 175768.2 -217810.2 175481.1 -209298.8 175792.1 175830.8 175373.3 176052.1 -222810.7 -216921 176726.6 175391.1 -219645.2 132437.9 -220146.8 176226.8 176580 -213188 176475.8 176480.2 -218406.7 0 
+175935.5 -217664.2 176078.1 175768.8 -217814.2 175481.9 -209397.2 175791.5 175831.9 175374.5 176053.1 -222812.2 -216921.2 176726.4 175391.9 -219656.7 132394.3 -220134.4 176225.7 176579.8 -213226 176475.4 176478.9 -218405.6 0 
+175935.6 -217665.1 176079.7 175769.5 -217813.7 175481.8 -209498.3 175791.6 175831.9 175375.6 176054 -222809.2 -216918.8 176726.3 175392.3 -219635.3 132369.8 -220130.6 176227.5 176581.3 -213246.3 176477.2 176479.7 -218405.3 0 
+175934 -217667.8 176081.2 175768.7 -217806.5 175481.3 -209551.7 175793.5 175831.9 175376.2 176054.4 -222804 -216914 176726.5 175392.1 -219597.5 132346.6 -220116.2 176230 176582 -213238.6 176479.4 176482.8 -218404 0 
+175932.3 -217674.5 176081.3 175767.4 -217800.8 175481.4 -209565.4 175794.5 175832.1 175376.4 176053.8 -222799 -216911.6 176726.7 175392.2 -219563.7 132273 -220099.2 176229.7 176581.9 -213218.4 176479.5 176483.7 -218404 0 
+175931.8 -217678.2 176080.4 175767.8 -217800.7 175481.5 -209578.8 175793.5 175831.5 175375.3 176053 -222794.3 -216910.9 176727.5 175392.6 -219532.8 132111 -220101 176227.5 176582.1 -213203.9 176478.8 176481.2 -218405 0 
+175933.1 -217678.3 176079.1 175768 -217802.4 175480.1 -209602.7 175792.1 175830.3 175373.3 176052.4 -222793.3 -216906.8 176727.5 175392.9 -219503.7 131885.4 -220107.4 176226.4 176581.9 -213178.8 176478.6 176479.1 -218406.1 0 
+175934.8 -217677 176076.5 175766.4 -217805.5 175478.5 -209627.5 175790.1 175829.5 175372 176052.6 -222796.6 -216903.7 176725.5 175392.7 -219490.7 131676.5 -220105.6 176225.7 176580.2 -213141.2 176476.7 176478.3 -218407.6 0 
+175934.3 -217670.3 176075.3 175765.5 -217808.9 175478.5 -209648.9 175788.4 175829.2 175371.4 176053.3 -222797.2 -216906.8 176723.8 175391.9 -219506.7 131550 -220106.3 176224 176578.8 -213118.9 176474.5 176477.5 -218405.7 100 
+175931.9 -217661.2 176077.3 175766.8 -217807.3 175479.2 -209647.2 175788.8 175828.6 175371.4 176053.4 -222794.9 -216908.1 176723.9 175391.1 -219541.2 131484.7 -220109.9 176223 176579.4 -213086.9 176474.9 176476.8 -218402.1 0 
+175931.5 -217658.1 176079 175767.7 -217803 175478.9 -209598.5 175791 175828.5 175371.5 176052.1 -222795.6 -216902.8 176724.3 175391.1 -219569.3 131402.5 -220110.5 176223.5 176580.8 -213005.6 176476.4 176476.7 -218404.8 0 
+175933.3 -217658.8 176078.5 175768 -217802.3 175478.5 -209518.2 175793.6 175829.5 175371.6 176051 -222796.4 -216898.3 176724.8 175390.9 -219585.7 131262.7 -220111.5 176224.5 176581.5 -212922.3 176477.4 176478.7 -218409.8 0 
+175933.1 -217657.6 176078.7 175769.1 -217801.5 175479.5 -209436.3 175794.8 175830.8 175372.8 176051.7 -222793.8 -216897 176725.8 175390.5 -219600.1 131074.8 -220114 176225.9 176581.7 -212897.5 176478.4 176481.6 -218408.9 0 
+175930.9 -217657.5 176079.3 175769.8 -217796.5 175480.7 -209363 175793.1 175831.2 175374.3 176052.4 -222792.7 -216896.6 176726.1 175390.9 -219611.5 130871.2 -220117 176226.9 176581.3 -212910.2 176477.8 176482.2 -218405.7 0 
+175930.1 -217660.5 176078.3 175768.3 -217795.2 175480.6 -209310.3 175790.8 175830.3 175373.9 176052.3 -222794.9 -216897.8 176725 175391.6 -219609.5 130683.8 -220123.5 176226.5 176580.5 -212921.9 176475.9 176479.9 -218405.2 0 
+175931.6 -217660.6 176076.7 175766.6 -217801.1 175480.2 -209293.8 175791 175828.5 175371.8 176052.1 -222796.5 -216899.4 176724.5 175391.1 -219586.8 130514.2 -220129.2 176225.4 176580.4 -212938.6 176476.4 176478.2 -218405.7 0 
+175933.1 -217658.3 176076.7 175766.4 -217805.5 175480.3 -209294.7 175791.9 175828 175371.5 176052.4 -222796.6 -216902.2 176725.8 175390.4 -219556.8 130357.7 -220121.3 176225.2 176581.2 -212978.7 176478.8 176479.2 -218406.1 0 
+175933.2 -217659 176077.2 175767.1 -217803.9 175479.8 -209284.1 175790.8 175829.4 175373.8 176052.6 -222797.4 -216909.8 176726.6 175391 -219549.4 130211.2 -220093.6 176225.4 176581.3 -213045.1 176478.9 176480.5 -218408.6 0 
+175932.3 -217660.2 176076.6 175767.5 -217801.6 175478.3 -209283.8 175788 175831 175374.4 176052 -222799.8 -216918.1 176724.7 175391.8 -219561.8 130039.1 -220064.3 176225.3 176579.4 -213131.3 176476.1 176479.1 -218413.2 0 
+175931.8 -217657 176075.6 175767.7 -217802.3 175478.1 -209324 175787.5 175831.3 175372.9 176050.9 -222803 -216919.9 176722.7 175391.6 -219558.9 129821.8 -220064.3 176225.4 176577.5 -213217.9 176474.9 176477.5 -218414.8 0 
+175932.9 -217650.7 176076.5 175767.8 -217804.6 175480 -209370.5 175791.3 175831.1 175372.6 176051.4 -222804.1 -216914.7 176723.9 175391 -219539.9 129596.1 -220093.6 176225.7 176578.9 -213281.3 176477.8 176479.6 -218410.9 0 
+175933.1 -217648.5 176078.1 175767.3 -217805.5 175481.1 -209372.4 175794.2 175830.6 175373.3 176053.4 -222803.3 -216907.1 176726.3 175391 -219540.1 129388 -220121.9 176225.7 176581.4 -213323 176480 176481.8 -218407.5 0 
+175930.7 -217656.8 176077.7 175766.1 -217803.2 175480.2 -209330.4 175792.5 175829 175372.1 176053.3 -222802.8 -216905.3 176727.1 175391.3 -219564.2 129182.2 -220136.1 176224.5 176581.3 -213367.3 176478 176479.7 -218407.7 0 
+175929.5 -217669.6 176076.1 175765.3 -217797.2 175478.8 -209270.4 175790 175827 175369.8 176050.8 -222802.2 -216912.5 176726.6 175391.5 -219577.9 128972.6 -220147.1 176222.8 176580.2 -213421.8 176475.4 176476.5 -218406.9 0 
+175931.3 -217674.6 176075.4 175765.6 -217790.7 175478.4 -209207.9 175788.6 175826.9 175369.5 176048.6 -222801.7 -216919.2 176725 175391.3 -219568.6 128773.3 -220156.7 176222.5 176578.7 -213452.6 176474.8 176475.9 -218404.7 0 
+175932.7 -217669.6 176076.1 175766.3 -217790 175478.7 -209155.4 175788.1 175828.8 175371.5 176047.8 -222801.2 -216916.7 176723 175390.3 -219565.7 128573.6 -220156.2 176223.3 176577 -213413.2 176475.2 176477.2 -218404.8 0 
+175932.5 -217663 176077.6 175766.4 -217795.8 175479.2 -209135.6 175789.1 175830.8 175373 176048.6 -222796.9 -216913 176722.3 175389.2 -219583.8 128343.1 -220152.6 176223.8 176577.9 -213310.3 176475.5 176478.8 -218406.2 0 
+175932.1 -217662.4 176077.5 175765.7 -217801.1 175479.2 -209162.8 175790.2 175831 175373.1 176050.2 -222791 -216919.2 176723.1 175389.4 -219594.5 128103.8 -220160.3 176224.3 176579.8 -213208.2 176474.4 176479.6 -218406 0 
+175931.7 -217663.6 176075.5 175765.8 -217800.4 175479.1 -209210.6 175790.4 175829.8 175373.1 176051.3 -222790.1 -216926.8 176724.2 175390.1 -219575 127911.8 -220172.6 176224.8 176579.9 -213138.7 176473.6 176479 -218402.4 0 
+175931.5 -217659 176075.2 175767.6 -217797.2 175480.5 -209233.5 175792.1 175829.8 175373.5 176052.2 -222791 -216923.2 176726.1 175390 -219541.8 127763.5 -220175.6 176225.6 176580.7 -213077.3 176476.1 176479.5 -218396.6 0 
+175932.5 -217656.5 176076.5 175769.1 -217797.3 175482.2 -209213.4 175794 175830.7 175373.7 176052.6 -222790.2 -216914.4 176727.9 175390 -219523.4 127603.5 -220174.3 176225.9 176581.4 -213022.1 176478.1 176480.5 -218396.3 0 
+175933.8 -217665.2 176077.5 175768.6 -217799.8 175481.5 -209156.6 175792.8 175830.4 175373.3 176052.3 -222792.3 -216911 176727.5 175390.3 -219528.4 127403.9 -220174.9 176224.6 176578.8 -212991.1 176476.8 176480 -218403.4 0 
+175933.1 -217675.2 176077.9 175766.6 -217801.5 175479.3 -209078.3 175790.2 175829.6 175372.3 176051.4 -222796.4 -216910.7 176725.6 175390.4 -219548.7 127176.9 -220168.2 176223.8 176576.4 -212967.5 176475.6 176479.7 -218409.8 0 
+175931 -217678.2 176077.6 175765.1 -217801.5 175477.9 -209009.3 175788.8 175829.1 175371.8 176050.4 -222798.5 -216910.9 176724.1 175390.6 -219573.2 126941.8 -220156.2 176225.1 176576.6 -212931.8 176476.1 176480 -218412 0 
+175930.8 -217679.4 176076.9 175766.3 -217797.4 175478.1 -208975.5 175788.8 175828.4 175372.4 176050.6 -222798.6 -216916 176723.5 175391.2 -219596.1 126712.5 -220158.1 176226.4 176577.2 -212891.9 176476.4 176479.6 -218411.4 0 
+175933.6 -217681 176077.4 175769.2 -217787.9 175479.3 -208961.7 175790.4 175828.4 175373.5 176052.4 -222794.9 -216923.2 176725 175391.4 -219630.6 126496.8 -220173.6 176226.7 176578.7 -212855.4 176476.6 176479.2 -218408.5 0 
+175934.8 -217679.3 176077.7 175769.5 -217782.9 175480 -208930.3 175791.6 175829.9 175373 176053.2 -222788.2 -216921.7 176726.9 175391.1 -219692.5 126288 -220181.7 176227.1 176580.6 -212811.2 176476.3 176479.1 -218408.4 0 
+175932 -217674.2 176076.1 175766.4 -217787.5 175478.9 -208875.9 175791 175830.5 175371.4 176051.8 -222785.1 -216912 176726.4 175390.5 -219754.3 126062.3 -220178 176227.4 176580.1 -212726.2 176475.9 176479.1 -218414 0 
+175929.3 -217670.2 176075.4 175764.8 -217788 175477.9 -208831.4 175790.5 175829.6 175371.3 176050.9 -222787.6 -216907 176724.8 175388.9 -219772.8 125828.5 -220176.6 176226.7 176578.3 -212592.8 176476.7 176480.2 -218417.7 0 
+175930.3 -217671.1 176077.1 175765.7 -217779 175478.5 -208825.9 175791.4 175829.1 175372.6 176052 -222791.5 -216912.5 176724.3 175388.4 -219759.4 125649.8 -220179.3 176225.6 176578.7 -212470.7 176477.3 176481.5 -218415.5 0 
+175932.5 -217674.6 176078.1 175766.7 -217772.4 175480.4 -208845.5 175792.2 175829.9 175373 176053.2 -222792.8 -216920.4 176724.3 175389.9 -219764.5 125547.3 -220173 176225.7 176580.3 -212405.3 176476.6 176480.8 -218412.2 0 
+175933.5 -217676.2 176077.3 175767.2 -217772.7 175481.5 -208838.8 175791.8 175830.4 175373 176052.9 -222791 -216920.7 176723.9 175390.7 -219805.4 125435.4 -220155.2 176226.3 176581.3 -212356.8 176476.2 176479.5 -218410.6 0 
+175933.6 -217674.8 176075.9 175767.7 -217775.2 175480.1 -208776.9 175790.9 175829.6 175373.6 176052.6 -222789.2 -216914.3 176723.6 175390 -219845.4 125210.2 -220142.5 176226.1 176581.4 -212271.6 176476.7 176480.4 -218410.5 0 
+175934.5 -217672.9 176075.7 175768 -217777.7 175478.6 -208693.7 175791.2 175828.4 175373.4 176053.3 -222786.9 -216908.3 176723.8 175389.9 -219833.6 124844 -220144 176225.7 176581.2 -212139.7 176477.5 176482.3 -218412 0 
+175934.9 -217672.6 176077.3 175768.5 -217777.5 175479.9 -208638.4 175792.1 175828.6 175372.6 176053.8 -222781.1 -216907.7 176724.5 175391.2 -219762.3 124386.3 -220147.5 176226 176581.5 -211973.3 176478.5 176482.3 -218411.5 0 
+175932.7 -217673.6 176078.5 175769.2 -217776.7 175482.3 -208622.3 175791.5 175830.3 175373.2 176053.2 -222775.8 -216914.1 176724.7 175392.1 -219685 123914.2 -220144.3 176226.3 176581.9 -211810.6 176478.2 176480.3 -218408.5 0 
+175930.5 -217674.2 176077.9 175769 -217780.1 175482 -208633 175790 175831.5 175374 176051.5 -222775.2 -216922.6 176723.9 175391 -219657.2 123487.1 -220139.9 176225.8 176581.2 -211692.9 176476 176478.5 -218410.4 0 
+175931.3 -217673 176077.9 175768.4 -217783.4 175480.1 -208666.2 175791.2 175831.4 175373.2 176051.3 -222775.4 -216921.9 176723.6 175389.4 -219665.8 123147.6 -220139.4 176226 176581 -211612.9 176476.6 176479.3 -218413.9 0 
+175932.9 -217672.6 176079.6 175768.4 -217782.3 175479.2 -208709 175794.3 175830.8 175371.4 176052.9 -222776.3 -216910.7 176724.7 175389.7 -219663.8 122906.3 -220146.8 176226.6 176582.3 -211547.3 176480.1 176481.7 -218409 0 
+175932.8 -217675 176080 175767.9 -217783.2 175479 -208722.8 175793.6 175829.9 175370.7 176053.1 -222782.6 -216900.5 176725.1 175390.4 -219649.2 122697 -220163.7 176225.9 176581.6 -211498.9 176479.5 176481.8 -218402.6 0 
+175931.9 -217675.3 176078 175765.4 -217787.7 175478.1 -208695.8 175790.1 175828.4 175371.3 176051.3 -222788.8 -216897.7 176724.4 175390.3 -219649.6 122443.3 -220178.8 176224.4 176579.3 -211462 176474.9 176479.9 -218404.6 0 
+175931.3 -217670 176075.8 175763.1 -217789.1 175477.3 -208673.5 175788.6 175827.7 175371.7 176050.5 -222786.9 -216899.6 176723.8 175390 -219664.1 122162.9 -220181.4 176223.5 176578 -211420.5 176472.9 176478.6 -218410 0 
+175931.8 -217666.7 176075 175764.3 -217786.4 175477.8 -208677.4 175788.9 175828.7 175371.4 176050.6 -222782.4 -216902.2 176722.9 175390.1 -219671.1 121913.2 -220175.7 176223.2 176577.5 -211383.3 176474 176477.4 -218414.1 0 
+175932.7 -217668.5 176076.4 175766.8 -217783.3 175479.5 -208665.7 175790.6 175829.5 175371.1 176050.9 -222783.4 -216903.4 176723.2 175390.8 -219656.2 121696.1 -220165.7 176224.1 176579.1 -211360 176476.1 176477.4 -218416.7 0 
+175932.4 -217669.5 176077.8 175767 -217781.5 175480.7 -208614.4 175792.5 175828.6 175370.7 176051.4 -222787.8 -216905.5 176725 175391.6 -219619.8 121476.5 -220148.8 176226 176581.4 -211341.6 176477.4 176479.5 -218416.5 0 
+175931.9 -217669.8 176076.5 175766.1 -217781.1 175479 -208545.5 175790.9 175827.7 175370.2 176050.9 -222789.5 -216910.9 176725.2 175390.9 -219577.8 121236.7 -220135 176225.6 176579.8 -211335.8 176475.8 176479.6 -218414 0 
+175932.1 -217672.7 176075.3 175766.4 -217779.6 175477.3 -208475.3 175788.3 175828.9 175370.6 176050.4 -222786.5 -216915.7 176724.4 175389.7 -219547.2 121000.6 -220136.6 176223.6 176577.2 -211362.2 176473.9 176477.6 -218410.6 100 
+175931.5 -217674.5 176076.5 175767.3 -217776.5 175478.3 -208416.3 175788 175831.3 175372.1 176051.5 -222781.9 -216915.1 176724.9 175390.5 -219535.5 120818.3 -220146 176223.3 176578 -211400.7 176474.1 176477.8 -218407.5 0 
+175930.7 -217673.2 176077 175767.8 -217774.1 175479 -208392.5 175788.1 175831.8 175372.5 176052.1 -222779.4 -216910.9 176724.5 175391.4 -219528.9 120711.9 -220151 176223.7 176578.6 -211414.6 176474.4 176479.4 -218407.5 0 
+175931.8 -217671.3 176076.8 175767.5 -217771.6 175478.5 -208401.5 175790 175830.1 175371.4 176051.8 -222779.2 -216904 176723.9 175390.5 -219496.4 120637.1 -220150.6 176224.2 176578.1 -211415.8 176475.8 176480.3 -218408.2 0 
+175934.5 -217670.2 176077.7 175767.3 -217767.9 175479.5 -208419 175794.5 175828.8 175371.2 176052.9 -222781.6 -216895.4 176725.8 175390.4 -219436.1 120508.2 -220147.3 176226 176579.7 -211435.9 176479.6 176480.9 -218406.2 0 
+175935 -217669.9 176077.8 175767.4 -217766.8 175480.9 -208441.7 175795.2 175829.2 175372 176053.7 -222786.5 -216892.2 176727.1 175392.2 -219383.3 120282.1 -220147.4 176227 176580.3 -211470.4 176480.1 176480.3 -218404.8 0 
+175932.9 -217668.9 176076.4 175765.8 -217772.9 175479.9 -208473.6 175791.1 175829.7 175371.3 176052.6 -222787 -216897.5 176725.6 175392.8 -219352.1 120007.4 -220157.4 176226.2 176578 -211500.9 176475.3 176479.1 -218406.4 0 
+175930.9 -217667.7 176076.9 175764 -217781.3 175478.6 -208505.6 175789.6 175829.5 175370 176052.3 -222779.5 -216903 176724.2 175391.6 -219331.3 119759.5 -220165.4 176226 176577.6 -211525.9 176472.7 176479.6 -218406.6 0 
+175929.3 -217669.2 176078.1 175765.1 -217781.7 175479.2 -208527.3 175791.5 175829.1 175370.3 176053.1 -222773.1 -216903.9 176724.6 175390.5 -219325.3 119573.7 -220155.5 176226 176580.3 -211560.9 176475.1 176481.2 -218404.8 0 
+175928.6 -217670.5 176076.4 175766.7 -217774.6 175479.4 -208531.3 175790.9 175828.8 175371.9 176052.9 -222775.3 -216904.7 176725.1 175390.3 -219343.2 119452.9 -220140.7 176224.8 176580.7 -211616.2 176477 176480.8 -218405 0 
+175930 -217665.6 176073.5 175766.4 -217770 175478.2 -208523 175787.6 175827.8 175373.2 176051.6 -222781.6 -216904.4 176723.8 175390 -219371.8 119373.2 -220146.4 176224 176577.5 -211665.9 176476.4 176478.7 -218406.3 0 
+175931.2 -217657 176073.8 175766.1 -217769 175477.1 -208519.1 175787.3 175826.8 175372.7 176050.7 -222784.4 -216898.8 176721.9 175389.7 -219389.7 119317.5 -220164.8 176224.5 176575.8 -211683.1 176475.4 176477.5 -218406.5 0 
+175930.2 -217652.7 176076.8 175766.8 -217765.4 175476.8 -208520.6 175791 175827.3 175371.5 176050.7 -222782.6 -216891.3 176722 175389.5 -219394.1 119294 -220167.8 176224.9 176577.3 -211691.5 176475 176477.9 -218409.2 0 
+175928.9 -217654.2 176078.5 175767.1 -217760.7 175477.2 -208520.2 175793.5 175829.1 175371.9 176051.1 -222782.3 -216889.7 176724.2 175389.4 -219397.9 119261.7 -220157.9 176224.4 176578.3 -211711.2 176474.9 176478.6 -218416.2 0 
+175928.4 -217655.2 176078.4 175767.2 -217760.2 175477.9 -208529.5 175792.9 175830.5 175372.4 176051.1 -222785.5 -216895.9 176725.5 175389.4 -219403.3 119137.3 -220155.2 176224.3 176577.6 -211717.7 176475.2 176478.4 -218418 0 
+175928.5 -217652.5 176078 175767.9 -217762.5 175478.9 -208562.2 175792.8 175829.8 175371.6 176051.1 -222786.7 -216905.1 176724.9 175388.9 -219406.8 118918.1 -220158.1 176224.8 176577.5 -211702.3 176476.6 176478 -218409.1 0 
+175930.2 -217651.9 176076.9 175767.8 -217765.6 175478.9 -208606.3 175792.4 175828.3 175371 176051.3 -222786.7 -216910.5 176723.4 175387.5 -219419.3 118691.5 -220157.1 176224.5 176577.9 -211678.4 176476.6 176477.2 -218400.8 0 
+175932 -217655 176074.7 175765.6 -217770.5 175477 -208639.6 175789 175827.7 175370.7 176050.6 -222789 -216908.1 176722 175385.9 -219448.9 118523.3 -220152.8 176222.6 176576.7 -211635.9 176473.5 176476.3 -218402.2 0 
+175931.4 -217654.5 176073.9 175763.7 -217773 175475.6 -208653.8 175787.4 175827.7 175370 176050.3 -222789.6 -216900.4 176721.7 175386 -219480.9 118400.6 -220146.1 176221.2 176576.2 -211569 176471.2 176476.4 -218404.3 0 
+175930 -217651.5 176075.8 175764.8 -217769.6 175477.3 -208656.1 175790.5 175828.1 175370.4 176051.1 -222787.7 -216895.4 176722.4 175388.1 -219501.7 118276.6 -220142.9 176221.6 176577.9 -211502.2 176472.3 176477.7 -218399.7 0 
+175930.2 -217653.8 176077.3 175766.2 -217766.5 175479.1 -208662.6 175792.3 175828.7 175371.4 176051.6 -222787.8 -216896.6 176722.6 175389.5 -219516.5 118129.1 -220147.3 176222.3 176578.8 -211458.2 176473.1 176478.9 -218394.6 0 
+175931.6 -217658 176076.1 175765.1 -217768.7 175478.6 -208683.4 175790.3 175828.1 175371.2 176051 -222787.8 -216897.7 176722.3 175389.9 -219539 117960.3 -220146.3 176222.5 176578.3 -211439.7 176472.7 176478.7 -218394.2 0 
+175932.9 -217657.2 176074.1 175764.2 -217774.5 175477 -208701.7 175789.3 175827.4 175370.9 176050.4 -222785.7 -216897.3 176723 175390.9 -219574.8 117782.5 -220134.5 176223.9 176578.7 -211433.2 176473.4 176477.4 -218398 0 
+175933.7 -217653.6 176073.7 175765.4 -217782.5 175476.5 -208692.8 175790.6 175828.3 175371.2 176050.1 -222784.4 -216901.1 176724.3 175392 -219612.9 117616 -220130.2 176225.3 176579.6 -211413.3 176474.9 176477.5 -218404.6 0 
+175932.5 -217652.9 176073.9 175766.3 -217789.2 175477.2 -208657.6 175791 175829.2 175371.2 176049.8 -222784.6 -216907.1 176723.7 175391.5 -219637.3 117465.2 -220140.5 176224.4 176579.1 -211371.3 176475.2 176479.1 -218410.4 0 
+175930.5 -217658.1 176073.8 175766 -217789.7 175477.8 -208622 175789.5 175828.8 175370.9 176049.5 -222783.7 -216909 176721.9 175390 -219641.5 117310.9 -220151.1 176222.5 176577.4 -211329.3 176474.3 176479.4 -218410.1 0 
+175930.1 -217664.4 176074.3 175764.9 -217786.3 175476.9 -208605.3 175788.9 175827.8 175370.4 176048.8 -222780.4 -216906.8 176721.6 175388.8 -219628.9 117134.1 -220154.4 176222.4 176576.8 -211312.5 176473.8 176478 -218403.3 0 
+175930.1 -217664.2 176075.3 175763.6 -217783.9 175475.9 -208603.9 175790.2 175827.2 175369.3 176048.1 -222777.4 -216907.1 176722.7 175388 -219612.1 116924 -220153.7 176223.5 176577.6 -211318.4 176474.3 176476.5 -218398.8 0 
+175929.7 -217661 176075.1 175763.5 -217783.5 175475.9 -208615.4 175790 175827.2 175368.3 176048.3 -222779 -216914.4 176723.3 175388.4 -219604.7 116676.8 -220151 176223.3 176577.7 -211315.5 176474.1 176475.2 -218403.8 0 
+175930.6 -217660.8 176074.7 175764.4 -217784.1 175476.6 -208655.7 175787.5 175827.4 175368.5 176049 -222783.2 -216922.2 176723.4 175389.5 -219613 116398.4 -220149.6 176222.2 176577.2 -211271.2 176473 176475.2 -218411.1 0 
+175932.3 -217659.5 176075.7 175764.7 -217784.4 175477.6 -208723.8 175786.3 175827.8 175369.4 176049.5 -222785.7 -216922.5 176723.6 175389 -219635 116105 -220151.8 176222.9 176577.9 -211191.2 176473.4 176477 -218410.8 0 
+175931.9 -217655.3 176077 175764.3 -217785 175478.1 -208791.2 175787.7 175828.4 175370 176049.7 -222785.3 -216915.8 176723.6 175387.6 -219658.4 115834.5 -220153.6 176224.5 176579.2 -211126.1 176474.5 176478 -218405.8 0 
+175930.2 -217653.5 176076.7 175764.2 -217786.1 175476.9 -208839.1 175789.1 175828.7 175369.9 176050 -222782.5 -216908.8 176723 175387.5 -219676.9 115623.3 -220151.9 176224.2 176579.4 -211118.3 176474.4 176477.4 -218402.9 0 
+175929.4 -217655.2 176076.1 175764.4 -217785.5 175475.8 -208861 175789.8 175828.5 175370.5 176050.5 -222780.2 -216908.3 176722.7 175387.9 -219692.7 115457.6 -220148.4 176223 176578.8 -211158.1 176475 176478.2 -218401 0 
+175929.5 -217657.6 176076 175764.5 -217783.3 175476.5 -208851.3 175789.4 175828.2 175371.6 176051.2 -222781.8 -216913 176723.1 175387.3 -219688.8 115291.4 -220144.5 176222.5 176578.5 -211207.3 176475.8 176480 -218398.7 0 
+175929.4 -217659.9 176075.3 175764.1 -217781.3 175476.8 -208822.6 175787.2 175827.4 175371.1 176050.2 -222784.4 -216913.9 176723.3 175386.6 -219657.6 115093.1 -220142.2 176222 176577.9 -211235.8 176473.8 176479.8 -218399.4 0 
+175929 -217661.6 176074.7 175763.8 -217779.6 175476.7 -208791.8 175786.3 175826.8 175369.5 176048.9 -222783.4 -216909.8 176722.9 175387.3 -219632.4 114867.3 -220148.1 176222 176577.3 -211235.3 176471.8 176478.5 -218403.8 0 
+175928.8 -217662.7 176074.7 175763.5 -217776.3 175477.6 -208760.3 175788.3 175827.1 175369 176049.1 -222781.2 -216907.7 176722.4 175388.9 -219627 114669.2 -220159.2 176223.2 176577.7 -211219.4 176473.6 176478.3 -218407.4 0 
+175929 -217664.2 176073.8 175763.2 -217771.3 175477.3 -208733.1 175789.2 175827.3 175369.5 176049.4 -222781.1 -216908.2 176722.1 175389.4 -219614.2 114533.2 -220158.3 176223.7 176577.9 -211198.7 176474.8 176477.8 -218407.9 0 
+175929.1 -217665.7 176072.9 175762.8 -217768.2 175475.7 -208714.6 175788.4 175826.9 175369 176048.3 -222781.3 -216904 176722.8 175388.8 -219594.3 114386.8 -220139.6 176222.9 176576.8 -211177.6 176473.3 176476.3 -218407 0 
+175928.3 -217667.3 176073.2 175763.3 -217770 175475.6 -208691.9 175789 175827 175368.3 176047.5 -222780.8 -216894.5 176724.5 175388.6 -219597.5 114145.7 -220119.9 176223.2 176576.1 -211188.4 176473.2 176476.1 -218407.5 0 
+175928.4 -217669.3 176073.9 175764.6 -217771.6 175476.4 -208653.2 175789 175827 175368.8 176047.7 -222781.9 -216892 176724.3 175388.6 -219620.9 113832 -220118.1 176223.7 176575.6 -211249 176473.8 176476.6 -218410.3 0 
+175929.6 -217667.7 176075.1 175765.2 -217767 175477.2 -208604.7 175788.3 175826.4 175369.4 176048.7 -222781.8 -216900.6 176721.7 175388 -219634.3 113499.8 -220131.9 176223.1 176575.4 -211311.3 176473.9 176476.8 -218410.5 0 
+175929.7 -217662.3 176077.4 175764.5 -217760.2 175479.1 -208575.2 175790.2 175826.8 175369.4 176050.2 -222779.3 -216907.4 176720.9 175388.1 -219623.4 113165.4 -220144.4 176222.8 176577.4 -211333.5 176475.5 176478.1 -218406.2 0 
+175929.3 -217658.9 176077.9 175764.7 -217757.2 175479.9 -208596.2 175791.9 175828.1 175369.5 176050.8 -222778.1 -216907 176722.2 175389.2 -219593.3 112858.2 -220153.8 176223.7 176578.9 -211334.2 176476.9 176479.9 -218405.9 0 
+175930.3 -217660.4 176075.5 175766.1 -217758.7 175477.7 -208651.8 175789.6 175828.5 175369.2 176049.6 -222778.3 -216905.6 176721.8 175389.9 -219559.7 112622.4 -220165.3 176224.1 176576.8 -211334.3 176475.1 176480 -218411.6 0 
+175930.9 -217663 176074.3 175766.1 -217763.1 175475.6 -208687.2 175788.3 175827.8 175369.5 176048.8 -222776.3 -216904 176721.1 175389.7 -219535.4 112451 -220162.8 176223.2 176575.7 -211324.3 176473.1 176479.2 -218411.6 0 
+175930.1 -217663.7 176075.5 175764.5 -217767.4 175475.2 -208682.8 175790.9 175827.5 175370.8 176049.8 -222774.8 -216902.2 176722.8 175388.8 -219512.6 112294.1 -220140.2 176222.4 176578.2 -211298.4 176473 176478.3 -218403 0 
+175929.1 -217664.1 176075.4 175763.7 -217767.8 175474.7 -208664.6 175792.2 175827.2 175370.8 176051 -222777.5 -216901.9 176723.6 175387.8 -219484.8 112121 -220127.3 176222.8 176579.7 -211277 176474 176476.9 -218397.1 0 
+175928.6 -217665.6 176073.9 175764.4 -217767.2 175473.8 -208655.2 175790.1 175827.1 175370.2 176051.1 -222781.9 -216899.9 176722.1 175387.8 -219465.7 111928.3 -220136.3 176223.3 176578.6 -211283 176475 176476.4 -218395.8 0 
+175928.3 -217669.5 176073.5 175765.4 -217773.4 175474.7 -208657.2 175788.6 175827.4 175370.6 176050.9 -222783.3 -216895.6 176721.7 175388.8 -219462.2 111719.3 -220142.6 176223.2 176577.3 -211304.6 176475.5 176477.6 -218392.2 0 
+175928.6 -217677 176073.8 175765.5 -217781.6 175477.1 -208661.2 175788.6 175827.1 175370.9 176050.9 -222783.2 -216896.4 176722.9 175389.6 -219480.1 111507.6 -220134.9 176223.1 176577 -211315.3 176475.9 176478.6 -218388.8 0 
+175929.7 -217683.4 176074.6 175764.9 -217777.5 175478.7 -208659.7 175788.3 175826.9 175370.4 176050.8 -222787.2 -216907.7 176723.5 175389.4 -219537.9 111302 -220130.5 176223 176577 -211320.7 176476 176478.5 -218391.4 0 
+175930.8 -217683.2 176075.7 175764.5 -217763.6 175478.2 -208667 175788.2 175827.6 175370.7 176050.5 -222793.7 -216921.9 176723.6 175388.1 -219624.2 111087.1 -220139.9 176222.5 176577 -211339.8 176475.1 176479 -218397.3 0 
+175930.6 -217679.9 176075.6 175764.6 -217757.5 175477 -208685.1 175789.6 175828.1 175371.6 176050.2 -222794.8 -216924.8 176723.6 175386.7 -219688.8 110865.4 -220151.7 176222.6 176577.9 -211364 176474.3 176479.7 -218400 0 
+175929.5 -217676.8 176074.8 175765.1 -217762.5 175476.7 -208653 175791.1 175828.3 175371.8 176050.7 -222791 -216917 176723.9 175386.5 -219701.4 110673.2 -220152.6 176223.7 176579.5 -211369.6 176475.1 176478.3 -218398.4 0 
+175929.1 -217673.6 176074.8 175765.3 -217765.5 175477.2 -208507.3 175791.5 175828 175371 176051.4 -222789 -216911.5 176724.4 175387.4 -219679.8 110520.4 -220139.8 176224.2 176580.1 -211348.5 176475.9 176475.9 -218397 0 
+175929.7 -217673.8 176074.4 175764.6 -217759.8 175477 -208294.9 175790.4 175827 175369.9 176051.4 -222789.3 -216913 176724.6 175388.4 -219655.4 110369.6 -220121.3 176223 176578.5 -211314.5 176474.8 176475.2 -218398.1 0 
+175930.9 -217679.5 176073.5 175764.1 -217753.8 175476.5 -208138.7 175788.1 175826.8 175369 176050.7 -222789.4 -216916.4 176724.3 175388.9 -219636.1 110174.4 -220117.8 176221.9 176576.1 -211288.7 176472.7 176475.5 -218398.7 0 
+175931.5 -217684.3 176074.3 175764.9 -217757 175476.2 -208094.2 175786.4 175828.3 175369.1 176049.9 -222789.7 -216916.9 176724.2 175389.3 -219621 109916.3 -220140.3 176222.7 176574.7 -211270 176472.4 176475.4 -218396.8 0 
+175931.3 -217683.6 176076 175765.8 -217762.5 175476.4 -208115.5 175787.3 175829.8 175370.1 176049.5 -222790 -216915.6 176724.4 175390.1 -219614.2 109613.5 -220164.8 176224.2 176575.3 -211230 176474.3 176475.4 -218393.5 0 
+175931.7 -217680.2 176076.6 175765.4 -217758 175476.7 -208147.5 175789.6 175829.8 175370.7 176049.5 -222789.7 -216917.3 176724.7 175390.3 -219600.3 109305 -220162.6 176224.4 176576.5 -211157.9 176476.3 176476.8 -218391.1 0 
+175931.3 -217677.6 176076.6 175764.5 -217749.2 175476.9 -208173.3 175791.5 175829.5 175371.1 176049.8 -222788.2 -216920.3 176724.6 175389.9 -219560.7 109031.1 -220143.9 176223.9 176576.8 -211086.7 176477.5 176479.1 -218392.1 0 
+175929.7 -217677.9 176076.6 175764.8 -217751.6 175477 -208191 175792.7 175829.9 175372.7 176050.8 -222786.3 -216917.6 176724.3 175390 -219518.4 108803.3 -220137.8 176224.3 176577.1 -211042.2 176477.3 176480 -218395.7 0 
+175930.1 -217681.5 176076.5 175766 -217765.6 175477.7 -208211.8 175792.7 175829.8 175374.2 176051.7 -222786.3 -216910 176724.4 175389.9 -219507.9 108586.9 -220149.7 176225.7 176578.5 -210988.5 176475.2 176479.2 -218398.5 0 
+175932.3 -217683.7 176076.3 175766.2 -217777.1 175478.2 -208260.7 175791 175828.3 175373.3 176051.3 -222787.5 -216904 176724 175389 -219520 108364.3 -220160.6 176226.4 176579.2 -210892.4 176472.8 176477.5 -218400.1 0 
+175932.5 -217678.7 176075.2 175765.5 -217779.1 175477.7 -208348.3 175789.3 175826.7 175371.1 176050.2 -222787.7 -216901.3 176722.8 175388.4 -219518.1 108187.5 -220156.5 176224.8 176577.5 -210800.6 176471.9 176475.6 -218401.8 0 
+175930.1 -217671.9 176073.5 175764.7 -217777.8 175476.7 -208456.5 175788.2 175826.5 175369.6 176049.3 -222788.1 -216896.1 176722.5 175389.3 -219490.9 108098.2 -220143.5 176222.3 176575.2 -210767.5 176472.2 176475 -218402.1 0 
+175928 -217671.3 176072.4 175764.5 -217777.5 175476.2 -208554.3 175786.6 175827.4 175369.8 176049.4 -222789.2 -216886.1 176723.1 175389.8 -219463.2 108051 -220134.1 176222.3 176575.3 -210776.3 176472.2 176476 -218402.5 0 
+175928.8 -217672.8 176073.1 175765.7 -217776.1 175476.5 -208620.5 175785.4 175828.3 175371 176050.4 -222789.3 -216880.5 176722.9 175388.3 -219453.3 107985.2 -220132.3 176224.2 176576.9 -210793.6 176472.3 176476.6 -218408.2 0 
+175931.5 -217670.9 176074.2 175766.3 -217776.2 175476.8 -208665.7 175787.4 175828.1 175371.5 176051.3 -222787.4 -216881.9 176722.7 175386.2 -219446.3 107905.5 -220134 176224.3 176577.9 -210817.5 176473.8 176476.2 -218415.2 0 
+175932.3 -217670.8 176074.3 175764.9 -217783.6 175476.9 -208725.4 175790.3 175827.3 175371 176051 -222785.2 -216884.9 176722.7 175386.3 -219429.2 107820 -220136.2 176222.5 176578.1 -210847.3 176475.3 176475.6 -218414.1 0 
+175930.6 -217674.3 176075.2 175764.4 -217793.3 175477.3 -208816 175791.1 175827.1 175370.7 176050.3 -222784.3 -216889.7 176722.9 175388.5 -219416.5 107702 -220133 176222.6 176578.8 -210870.5 176475.3 176475.7 -218404.9 0 
+175928.8 -217675.2 176077.2 175765.7 -217794.2 175477.9 -208919.2 175790.8 175827.2 175370.7 176049.7 -222784.6 -216894.6 176724.1 175390 -219425.5 107563.7 -220120.5 176224.6 176579.9 -210879.2 176475.3 176476.4 -218398.1 0 
+175928.4 -217675.4 176076.2 175765.4 -217788.3 175477.8 -209016.3 175789.8 175826.4 175370 176048.6 -222785.3 -216896.2 176724.8 175389 -219456.6 107454.8 -220118 176225.1 176579.3 -210874.3 176474.4 176476.1 -218398.5 0 
+175929.9 -217678.8 176073.1 175764.2 -217788.3 175476.9 -209108.4 175789.3 175825.9 175369 176047.9 -222785.4 -216899.6 176723.6 175387.7 -219487.7 107386.6 -220139.8 176223.8 176577.2 -210865.8 176473.3 176475.8 -218399.1 0 
+175932.1 -217678.9 176072.7 175764.6 -217796.1 175476.6 -209198.3 175790.2 175827.1 175369.4 176048.8 -222786.5 -216906.9 176722.4 175388.3 -219483.4 107325.3 -220153.6 176222.8 176576.1 -210868.9 176474.7 176477.1 -218395.9 0 
+175932.3 -217672.2 176073.6 175765.2 -217799.9 175476.5 -209278 175790.5 175827.8 175370 176049.8 -222789 -216909.9 176722.3 175389.7 -219446.4 107231.1 -220133.9 176222.3 176575.9 -210875.3 176476.4 176477.6 -218394.5 0 
+175930.8 -217665.4 176073.2 175765 -217793.7 175476.1 -209344.9 175789.2 175826.6 175369.2 176049.7 -222789.2 -216904 176722.6 175390 -219422.7 107101.6 -220107.8 176222 176575.5 -210846.8 176475.2 176476.3 -218397.9 0 
+175929.8 -217663 176073.6 175764.7 -217783.5 175476.2 -209412.8 175788.8 175825.8 175369.1 176049.5 -222787.3 -216895.4 176723.3 175389.3 -219430.8 106972.6 -220100.4 176222.6 176575.9 -210768.9 176473.6 176475.6 -218402.4 0 
+175929.8 -217659.3 176076.3 175764.3 -217776.8 175476.8 -209482.4 175790 175827.2 175370.9 176049.4 -222787.6 -216893.5 176724.6 175388.9 -219441.4 106860.7 -220103.5 176223.8 176576.6 -210690.1 176473.8 176476.3 -218403.3 0 
+175930.6 -217651.9 176077.7 175764 -217775.9 175477.3 -209527.6 175790.4 175829 175372 176049.5 -222791.1 -216899.1 176724.8 175388.7 -219428.7 106763.5 -220108.3 176224 176576.1 -210668.8 176474 176476.6 -218400.3 0 
+175931.3 -217646.5 176076.9 175764.3 -217777.7 175477.9 -209546.8 175789.3 175829.7 175371.2 176049.8 -222793.8 -216904.3 176723.5 175388.4 -219399.2 106683.7 -220112.7 176223.9 176576 -210703 176474.4 176477 -218396.3 0 
+175931 -217645.8 176076.7 175765.4 -217779.2 175478.1 -209579.3 175789.5 175830 175370.6 176050.3 -222794.3 -216905.6 176722.9 175388.7 -219372.3 106604.8 -220113.6 176225 176578.3 -210749.2 176476 176477.7 -218394.8 0 
+175930.1 -217647.8 176077.2 175766.9 -217781.7 175477.7 -209637.6 175791.5 175829.9 175370.9 176050.8 -222795.7 -216902.9 176723.8 175389.3 -219349.4 106506 -220110.8 176226 176580.1 -210783.4 176477.2 176477.7 -218396 0 
+175929.5 -217649.9 176076.9 175767.7 -217784.8 175477.3 -209691.3 175792.7 175828.8 175371.6 176050.7 -222797.7 -216894.2 176724.3 175388.8 -219326.9 106391.2 -220108.4 176225 176579.4 -210811.9 176476.8 176477.7 -218396.2 0 
+175929.3 -217651.5 176076.3 175767.1 -217785.3 175477.4 -209725.9 175791.5 175827.3 175371.7 176049.7 -222798.3 -216887.8 176724.5 175388.3 -219322.2 106264.4 -220111.2 176223 176577.4 -210840 176475 176478.2 -218394.1 0 
+175929.5 -217652.9 176075.6 175765.9 -217782.9 175477.4 -209751 175788.9 175826.8 175370.8 176049.4 -222799.2 -216892.7 176725.1 175388.4 -219347.5 106114.9 -220116.3 176221.9 176575.9 -210862.1 176472.1 176477.2 -218394.6 0 
+175929.9 -217653.9 176075.2 175765.2 -217779.5 175477.3 -209771.8 175788 175827.6 175370.5 176050.3 -222799.6 -216897.9 176724.7 175388.2 -219384.1 105935.6 -220113.3 176222.6 176576.8 -210873.5 176471.5 176476 -218397.9 0 
+175929.9 -217653.7 176074.7 175764.6 -217776.1 175477 -209798.6 175787.9 175828.8 175371.2 176050.3 -222798.2 -216895.9 176722.9 175388 -219411.9 105747.9 -220102.5 176223.8 176577.8 -210882 176473.9 176475.9 -218399 0 
+175929.5 -217653.9 176072.9 175763.4 -217773 175476.5 -209848 175785.2 175828.4 175371 176048.4 -222797.1 -216898.5 176720.9 175388.4 -219433.3 105602.2 -220093.8 176223.1 176575.5 -210902.7 176474.4 176474.5 -218397.2 0 
+175929 -217655.5 176071.4 175762.7 -217772.1 175475.8 -209905 175782.8 175826 175369.6 176046.8 -222794.7 -216906.6 176719.7 175388.5 -219450.3 105523.3 -220093.9 176221.4 176572.5 -210935.9 176472.5 176472.9 -218396.7 0 
+175929.1 -217654.7 176071.9 175763.2 -217772.5 175475.3 -209932.6 175784.7 175824.2 175369.3 176047.7 -222790.5 -216908.2 176720.3 175387.8 -219453.2 105475.6 -220109.4 176221.4 176572.9 -210953.6 176471.9 176474.4 -218398.6 0 
+175929.9 -217649.2 176072.8 175763.8 -217771.2 175475.4 -209935.6 175788 175825.2 175370.4 176049.7 -222791.3 -216907.1 176721.7 175386.5 -219449.3 105409.2 -220137.5 176222.3 176574.7 -210935.1 176472.2 176477.1 -218401.8 0 
+175930.4 -217644.3 176073.7 175764.5 -217769.9 175476.6 -209943.4 175789 175827.7 175370.7 176050.6 -222797.5 -216911.6 176722.5 175386 -219456.6 105300.6 -220156.5 176222.8 176575.1 -210896.7 176472.6 176477.6 -218402.4 0 
+175930.6 -217643.6 176074.8 175765.3 -217771.3 175477.4 -209946.7 175789.3 175829 175370.1 176050.7 -222801.8 -216917.4 176722.4 175386.4 -219467.8 105161.5 -220154.8 176223 176575.5 -210860.6 176474 176476.8 -218397.5 0 
+175930.4 -217644.4 176074.6 175764.6 -217774.6 175477.1 -209939 175789.6 175828.5 175370.4 176050.1 -222801.6 -216918.3 176722.2 175386.9 -219469.6 105042.3 -220144.8 176222.3 176576.4 -210832.2 176474.6 176476.8 -218392.4 0 
+175928.8 -217645.9 176073.8 175763.3 -217776.9 175476.9 -209952.4 175789.3 175827.2 175371 176049 -222797.9 -216914.1 176722.1 175386.6 -219481.9 104975.2 -220137.4 176220.9 176577.1 -210815.3 176473.7 176477.7 -218392.3 0 
+175927.7 -217649.8 176074.1 175763.8 -217775.5 175476.3 -209989.2 175789.2 175826.5 175370.3 176049.3 -222794.2 -216909.9 176722.5 175386.6 -219522.7 104932.6 -220134.7 176221 176577 -210814.3 176473.6 176477.9 -218394.3 0 
+175928.9 -217653.9 176074.6 175765.3 -217771.8 175475 -210015.6 175789.3 175827.1 175369.3 176051.5 -222794.6 -216910.6 176723.1 175387.8 -219568.2 104907 -220134.3 176222 176576.3 -210831.6 176474.5 176477.5 -218396.8 0 
+175930.2 -217654.1 176074.7 175765.7 -217769.9 175475.4 -210027.3 175789.4 175828.3 175369.2 176052.9 -222795.7 -216909.8 176723.2 175389.1 -219585.4 104927.9 -220128.4 176222.3 176576.3 -210866.8 176474.9 176477.9 -218399.9 0 
+175929.6 -217652 176075.3 175765.5 -217771 175477.3 -210048.6 175790.4 175828.6 175369 176052.1 -222793.4 -216903 176723 175389.6 -219573.7 104964.7 -220114.6 176222.6 176578 -210902.4 176474.7 176478.3 -218401.1 0 
+175929.2 -217652 176075.8 175765.6 -217775.6 175477.6 -210085.4 175790.8 175827.9 175368.7 176051 -222792.2 -216900.7 176723.5 175390.3 -219559.7 104949.8 -220101 176222.7 176579.2 -210918.1 176473.6 176477.6 -218398.9 0 
+175930 -217653.8 176075.6 175764.5 -217783.4 175476.6 -210112 175788.9 175826.6 175369.3 176050.5 -222794 -216907.8 176723.2 175389.8 -219567.2 104879.3 -220098.8 176221.7 176577.1 -210923.7 176471.8 176477.3 -218397.7 0 
+175929.5 -217655.7 176074.8 175762.4 -217790.1 175476.3 -210117.5 175787.6 175825.1 175369.9 176049.9 -222794 -216914.3 176721.1 175387.3 -219590.2 104794.6 -220112.3 176221.1 176575.3 -210928.4 176471.1 176477.5 -218401.9 0 
+175927.2 -217657.1 176074.7 175762.3 -217791.7 175476.2 -210138.7 175788.9 175824.8 175369.6 176049.3 -222791.7 -216914.2 176719.8 175385 -219600.5 104731.4 -220129.2 176222.4 176577.2 -210901.6 176472.7 176477.1 -218405.9 0 
+175926.2 -217656 176075.3 175763.9 -217788.5 175476 -210208.2 175789.3 175826.6 175369.3 176048.8 -222792.2 -216912.4 176720.6 175384.9 -219597.9 104713.6 -220133.6 176223.9 176578.5 -210826 176473.9 176476.2 -218402.9 0 
+175927.4 -217653.1 176074.5 175764.1 -217787 175475.9 -210302.8 175787.3 175828.3 175369.5 176048.3 -222796.3 -216913.7 176720.5 175386.1 -219605.4 104734.3 -220123.9 176223 176576.2 -210738.5 176472.6 176475.5 -218397.3 0 
+175928.5 -217650.9 176073.2 175763.3 -217790.1 175476 -210385.1 175787.5 175827.5 175369.4 176048.9 -222798.1 -216915.8 176719.7 175386.6 -219615.3 104760.7 -220108.3 176221.5 176575.5 -210681.1 176472 176476.2 -218394.7 0 
+175929.7 -217649.7 176074.1 175763.9 -217789.6 175476.4 -210456.3 175790.8 175825.8 175369.1 176050.2 -222794.6 -216913.8 176721.2 175386.3 -219605.2 104773.2 -220100.2 176222.2 176578.2 -210667.6 176474.2 176477.6 -218392.8 0 
+175930.6 -217650.6 176074.6 175765.6 -217781.4 175476.6 -210527.9 175790.7 175826.3 175368.8 176050 -222790.7 -216906.2 176723 175386.6 -219588.7 104766.5 -220109.9 176223.7 176578.5 -210687.9 176475.4 176477.1 -218390.2 0 
+175929.8 -217652.3 176072.8 175766.3 -217775.9 175476.1 -210592.5 175786.7 175827.8 175368.3 176048.1 -222790.4 -216897.2 176721.8 175387.8 -219595.4 104740.3 -220123.1 176223.6 176576 -210710.5 176472.9 176475.2 -218390.2 0 
+175928.9 -217651.4 176072.5 175765.3 -217780.8 175475.7 -210658.9 175785.9 175828.1 175368.1 176047.6 -222790.6 -216895.5 176719.9 175388.5 -219614.2 104714.3 -220119.5 176223 176576 -210711 176470.8 176475.3 -218394.8 0 
+175929.6 -217648.6 176075.8 175764 -217788.5 175475.9 -210753.1 175789.6 175827.9 175369.5 176049 -222787.4 -216903.4 176720.9 175388.6 -219606.6 104698.9 -220109.2 176222.8 176579.1 -210702.5 176472.2 176477.2 -218400.7 0 
+175930.2 -217646.9 176078 175764.1 -217790 175476.3 -210871.8 175791.3 175828 175371.9 176049.7 -222783.1 -216912.6 176723 175389.3 -219583.8 104655.5 -220109.3 176221.9 176579.8 -210711.3 176474.3 176478.1 -218402 0 
+175929.8 -217646.8 176076.2 175764.6 -217786.9 175476.3 -210984 175789.3 175827.8 175372.1 176049.3 -222780.5 -216912.4 176723 175389.8 -219595.6 104555.6 -220109.8 176220.9 176577.6 -210734.1 176474.3 176476.6 -218396.9 0 
+175929.8 -217647.2 176074 175764.8 -217783.1 175476.6 -211061.4 175788.3 175827 175369.9 176049.6 -222779.5 -216897.6 176722.3 175389.4 -219633.6 104442.2 -220094.5 176221.2 176577.3 -210738.8 176474.1 176475.4 -218387.9 0 
+175929.8 -217647.8 176074.3 175765 -217780.1 175477.2 -211089.1 175788.6 175826.3 175368.7 176050.7 -222783.5 -216879.4 176722 175389.2 -219645.7 104367 -220072.7 176221.7 176579 -210713.4 176474.2 176476 -218383.8 0 
+175929.3 -217645.9 176075.2 175765.4 -217780.3 175476.8 -211082.9 175787.8 175826.3 175369.1 176050.4 -222793 -216880 176721.9 175388.7 -219627.4 104315.3 -220061.9 176221 176578.1 -210688.4 176472.9 176476.4 -218390 0 
+175929.2 -217639.7 176075.1 175765.2 -217782.8 175476.3 -211077.3 175787.7 175826.6 175368.9 176049 -222796.7 -216894 176722.1 175387.3 -219606.4 104233.3 -220056.8 176220.9 176575.8 -210699.6 176472.6 176475.6 -218396.5 0 
+175929.5 -217634.4 176074.5 175764 -217783.3 175476.3 -211081.7 175789.8 175826.7 175368.1 176049.2 -222790.3 -216895.4 176722.6 175386.5 -219581.8 104114.9 -220044.8 176221.8 176575.8 -210741.7 176474.9 176475.4 -218395.3 0 
+175929.4 -217637.3 176073.9 175763 -217781.8 175475.8 -211083.8 175790.5 175826.7 175367.8 176049.9 -222786.7 -216887.4 176722.4 175386.8 -219543.1 104041.5 -220037.3 176221.5 176576.7 -210778.2 176475.4 176475.8 -218394.3 0 
+175928.9 -217644.6 176073.8 175763.3 -217781.1 175475.1 -211082.7 175788.1 175826.8 175368.1 176048.7 -222792.9 -216886.7 176721.3 175387.3 -219503.1 104067.1 -220048.7 176219.7 176575.6 -210793.1 176472 176476.2 -218397.5 0 
+175928.5 -217647.1 176074.8 175764 -217781.2 175476 -211085 175785.7 175826.9 175368.8 176047.6 -222797.6 -216889.3 176721.5 175387.8 -219472.4 104108.4 -220062.6 176219.2 176574.8 -210802.6 176470.5 176477 -218399.3 0 
+175928.8 -217643.3 176076.1 175763.8 -217782.2 175477.6 -211090.2 175786.2 175827.5 175369.8 176049.1 -222792.9 -216892 176722.4 175388 -219439.3 104088 -220064.8 176220.8 176576.2 -210820.9 176473.1 176478.3 -218397.3 0 
+175930.2 -217639 176075.6 175763.8 -217784.9 175478.1 -211089.4 175787.5 175828.7 175370.8 176050.7 -222788.4 -216898.2 176721.5 175387.5 -219404.2 104061.5 -220072.7 176222.3 176577.2 -210840.7 176474.8 176478.3 -218395.4 0 
+175931.4 -217636.4 176074 175764.2 -217788 175478 -211068.9 175787.9 175829.1 175371.2 176050.2 -222789.5 -216898.7 176720.1 175386.7 -219387.9 104079.1 -220094.4 176223.1 176577.3 -210844.5 176474.8 176477 -218395.7 0 
+175930.4 -217633.6 176074.3 175764 -217790.8 175477.9 -211030.5 175788.7 175828 175370.9 176049.2 -222786.5 -216891.7 176721.7 175387 -219395.7 104094.7 -220114.5 176223.6 176578.2 -210840.6 176476.3 176476.9 -218397.4 0 
+175928 -217633.5 176075.2 175763.3 -217793.1 175476.3 -210997.8 175788.7 175826.3 175370.2 176048.9 -222781.4 -216891.8 176724.1 175387.7 -219411.7 104060.1 -220124.6 176222.8 176577.9 -210863.4 176476.2 176477.5 -218401.3 0 
+175926.9 -217638.5 176073.8 175762.6 -217793.2 175474.4 -210983 175786.6 175825.6 175368.9 176048.1 -222786 -216903.3 176723.5 175387.4 -219420.3 103970 -220123.1 176221 176575.3 -210912.2 176473.4 176476.8 -218404.1 0 
+175927.8 -217642.2 176072.1 175761.6 -217790.5 175474.7 -210983.1 175785.5 175826.1 175368.2 176047.1 -222793 -216913.2 176722 175386.3 -219419.6 103841.9 -220105.5 176220.4 176573.8 -210941.4 176472.7 176475.7 -218399.2 0 
+175929.4 -217640.8 176072.5 175761.4 -217786.6 175475.8 -211000.7 175786.5 175825.8 175368.2 176047.3 -222790.4 -216916.9 176722 175386 -219422.6 103708.1 -220079.1 176221 176574.8 -210936.7 176473.7 176475.3 -218393.1 0 
+175930.5 -217637.7 176073.2 175763 -217785.9 175475.6 -211034 175787 175824.3 175368.2 176048.9 -222785.1 -216920 176722 175387.2 -219438.9 103584.4 -220063.1 176221.7 176575.3 -210927.3 176473 176474.5 -218395.9 0 
+175931.2 -217634.4 176073.7 175764.3 -217792.1 175475.9 -211065.3 175787.4 175824.2 175368.7 176049.9 -222784.3 -216922.7 176721.4 175388.9 -219458.2 103453.4 -220069 176222.6 176575.2 -210923.5 176472.6 176473.6 -218400.2 0 
+175931.3 -217637.9 176074 175763.4 -217797.2 175477 -211078.6 175787.1 175825.8 175369.5 176048.6 -222784.8 -216922.3 176721.4 175389.3 -219470.9 103329.3 -220085.9 176222.7 176575.3 -210908.7 176472.8 176473.1 -218398.2 0 
+175930.4 -217653.6 176073.8 175761.9 -217791.8 175476.8 -211069.7 175786.3 175826.4 175369.2 176047.3 -222782.6 -216918.3 176721.8 175387.8 -219485.1 103269.3 -220091.5 176221.6 176575.6 -210880.3 176471.5 176472.8 -218394.9 0 
+175929.3 -217667 176073.9 175762.5 -217783.5 175476.1 -211045.7 175788.2 175825.6 175369.1 176048.8 -222780.3 -216909.8 176721.7 175386.7 -219510.2 103270.5 -220090.8 176221 176576.3 -210851.4 176470.6 176474.2 -218393.8 0 
+175929.3 -217665.1 176074.9 175764.8 -217784.4 175476.5 -211016.3 175791.4 175826.2 175370.1 176050.9 -222782.9 -216899.5 176721.9 175387 -219544.2 103255.7 -220109.3 176221.4 176576.5 -210827.7 176472.2 176477 -218394.6 0 
+175929.7 -217658.2 176075.1 175765.4 -217792.6 175476.8 -210971.3 175790.9 175827 175369.6 176049.7 -222787.6 -216893.2 176722.5 175387.1 -219580.1 103192.4 -220138.2 176221.6 176576 -210803 176473.7 176477.8 -218396.7 0 
+175929.5 -217660.3 176072.7 175762.7 -217799.1 175475.3 -210893.5 175786.8 175825.3 175367.1 176046.6 -222788.1 -216893.4 176721.8 175386 -219605.7 103115.9 -220146.4 176220.9 176575 -210779.1 176472.9 176475.3 -218397.1 0 
+175929.1 -217667.1 176070.4 175760 -217799.8 175473.6 -210798.8 175784.7 175824.2 175365.2 176045.7 -222786.1 -216898.4 176720.5 175385.3 -219607.1 103049.2 -220138.7 176220.1 176574.1 -210779.1 176472.1 176473.5 -218393.7 100 
+175928.8 -217669.6 176071.5 175760.7 -217796.5 175474.1 -210720.7 175786.4 175826.1 175365.8 176047.4 -222784.9 -216904.4 176720.5 175385.7 -219591.6 103003.6 -220132.4 176220 176574.7 -210813.9 176472.7 176475.3 -218390.4 0 
+175928.1 -217671.6 176072.7 175763 -217793.8 175474.4 -210657.8 175787.2 175826.8 175367.4 176048.6 -222785.4 -216906.6 176720.7 175385.9 -219584.9 103004.4 -220123.2 176219.8 176575.1 -210843.8 176472.2 176476.4 -218391.4 0 
+175927.9 -217674.4 176071.5 175763.1 -217793.1 175472.9 -210575.5 175785.3 175824.2 175367.4 176047.5 -222786.7 -216902 176719.7 175385.8 -219596.2 103044.6 -220111 176219.3 176574.1 -210824.8 176471.2 176474.5 -218393.1 0 
+175928.9 -217671.2 176071.3 175761.7 -217794.8 175473.3 -210474.3 175785.1 175822.9 175366.6 176045.9 -222786.8 -216893.4 176720 175386.6 -219612.5 103070 -220105.9 176219.7 176574.6 -210773.1 176472.8 176473.8 -218390 0 
+175930.1 -217665.4 176073.2 175761 -217798.7 175475.9 -210408.2 175786.7 175824.7 175367.2 176046.3 -222789.1 -216887 176721.2 175387.4 -219624.5 103053.2 -220109.2 176220.5 176576 -210740.1 176473.9 176475.4 -218387.1 0 
+175930.4 -217664.6 176074.4 175761 -217802.1 175477 -210389.8 175786.5 175825.7 175368.5 176048 -222794.8 -216885.7 176720.8 175387.3 -219627.8 103020.1 -220117.6 176220.8 176575.5 -210749.3 176472.2 176476.2 -218390.4 0 
+175928.7 -217665.7 176073.7 175761.1 -217801.5 175476.1 -210376.3 175785.1 175824.9 175368.8 176048.3 -222796.9 -216889.9 176719.7 175387.1 -219615.6 102988.4 -220126.9 176221 176574.2 -210789.5 176470.1 176475.4 -218396.3 0 
+175925.5 -217667 176072.9 175761.4 -217797.1 175475.3 -210375.5 175784.8 175824.4 175368.3 176047.2 -222793.1 -216895.2 176719.4 175386.4 -219595.8 102964.7 -220131.7 176221.3 176574.4 -210835.1 176469.3 176474.2 -218398.6 0 
+175924.3 -217668.2 176074.2 175761.5 -217790.2 175475.8 -210424.6 175786.4 175825.4 175368.2 176047.2 -222787.8 -216893.8 176719.9 175385.7 -219587.2 102972.7 -220128.9 176221.8 176576.6 -210861.1 176471 176474 -218397 0 
+175926.8 -217663.3 176076 175761.8 -217783.3 175476.3 -210493.9 175787.4 175826.3 175367.9 176048.3 -222784.6 -216886.5 176720.5 175386.5 -219589.6 102994.4 -220124 176221.3 176577.7 -210865.5 176473.5 176474.1 -218395 0 
+175928.7 -217652.1 176074.7 175762.6 -217782.3 175474.6 -210540 175785.6 175826.1 175367.1 176048 -222784 -216882.1 176720.3 175388.3 -219589.5 102961.8 -220127.6 176219.5 176575.3 -210867 176473 176473.3 -218393.8 0 
+175927.7 -217641.7 176072.1 175763.3 -217788.4 175472.8 -210577.2 175784.2 175824.5 175367.2 176047 -222782.8 -216883.8 176719.6 175388.8 -219579.1 102852.2 -220136.5 176218.1 176573.1 -210874.4 176471 176472.9 -218389.3 0 
+175927.5 -217637.3 176071.9 175763.2 -217794.6 175473.4 -210625.2 175785.5 175823.4 175367.6 176047.6 -222779.7 -216889.9 176718.9 175387.5 -219551.1 102711.7 -220136.7 176218.6 176574.1 -210885.3 176470.6 176473.5 -218383.4 0 
+175929.7 -217639.8 176073.6 175763.2 -217796.5 175474.2 -210670.4 175787.5 175824.6 175367.5 176048.5 -222777.2 -216895.8 176719.3 175385.9 -219507.6 102592 -220127.9 176220.3 176575.2 -210916.8 176471.5 176474 -218382.2 0 
+175930.7 -217643.5 176075.6 175764.3 -217796.3 175474 -210704.7 175789.4 175826.7 175368.6 176049.1 -222776.5 -216894.7 176721.3 175385.4 -219473.7 102498.4 -220118.5 176221.8 176575.2 -210988.2 176472.7 176475.2 -218384.8 0 
+175928.7 -217641.6 176077.1 175765.5 -217795.7 175474.8 -210733.9 175790.9 175826.7 175370.4 176049.6 -222778.6 -216889.3 176723.2 175386.1 -219469.8 102380.5 -220113.4 176222.4 176576.1 -211072.5 176473.8 176476.6 -218386 0 
+175926.5 -217639.8 176076.6 175764.8 -217793.3 175475.7 -210749.4 175790.4 175825.1 175369.8 176049.1 -222782.4 -216888.2 176723 175386.5 -219483.9 102190.8 -220118 176222.4 176576.7 -211115.4 176473.9 176476.4 -218385.4 0 
+175926.4 -217645.2 176074.4 175763.1 -217787.9 175475.3 -210750.2 175788.7 175824.3 175367.2 176047.6 -222783.1 -216886.8 176721.5 175385.8 -219493.3 101956.3 -220132.2 176221.8 176575.7 -211107.3 176473.6 176476 -218385.1 0 
+175927.6 -217653.9 176072 175762.1 -217782.6 175474.4 -210772.2 175787.7 175825.1 175366.2 176047.1 -222783.3 -216877.4 176720.6 175384.5 -219495.6 101755.9 -220144.8 176220.9 176574.5 -211091.2 176472.6 176476.5 -218388 0 
+175929.1 -217658.5 176070.3 175761.8 -217781.5 175473.8 -210837.6 175786.9 175825.8 175367.4 176047.6 -222788.4 -216869.2 176720.2 175384.2 -219495.6 101623.2 -220146.3 176220.6 176574.2 -211084.2 176470.5 176476 -218393.8 0 
+175930 -217655.7 176070.9 175761.9 -217784 175474.3 -210913.5 175787.7 175825.4 175368.2 176048.4 -222793.7 -216874.2 176720.6 175384.9 -219483.3 101516.2 -220136.8 176221.3 176575.5 -211054.6 176470.9 176475.1 -218395.7 0 
+175929.2 -217649.1 176074.4 175762.4 -217787 175476.2 -210959 175789.5 175825.5 175367.8 176049.9 -222794.2 -216889.8 176721.7 175385.9 -219454.8 101396.5 -220127.3 176222.5 176578.2 -211004.4 176474.1 176476.4 -218393 0 
+175927.7 -217646.6 176076 175762.9 -217789.8 175477.6 -210974.8 175788.3 175826.5 175367.4 176051.2 -222792.7 -216904.4 176721.6 175386.9 -219425.8 101285.6 -220126.7 176222.7 176578.4 -210982.5 176474.1 176477.4 -218394.1 0 
+175927.3 -217645.6 176073.4 175763 -217791.4 175475.9 -210991.7 175785.6 175826.5 175367.9 176049.9 -222788.3 -216910.2 176720.1 175387.2 -219411.7 101218.9 -220130.2 176221.1 176575.3 -211001.2 176470.9 176475.2 -218395.7 0 
+175928.1 -217640.3 176071.4 175762.2 -217791.3 175472.5 -211030.4 175786.6 175825.5 175368.4 176047.5 -222781.6 -216908.6 176720 175386.5 -219422.3 101188.9 -220133.4 176220.2 176573.3 -211030.6 176470.1 176472.9 -218390.3 0 
+175927.8 -217640.8 176072.6 175760.9 -217790.6 175471.4 -211088.5 175788.8 175825 175368.2 176046.9 -222779.7 -216905.9 176721.9 175385.4 -219459.6 101157.5 -220136.9 176221.1 176573.7 -211033.9 176472.3 176473.5 -218385.5 0 
+175926.3 -217655 176073 175760.4 -217790.4 175473.2 -211154 175787.8 175825 175367.1 176047.1 -222783.2 -216906.1 176722.2 175384.9 -219513.5 101102.2 -220136.9 176222.3 176573.8 -210979.5 176473.3 176474.8 -218387.9 0 
+175925.9 -217669.4 176071.7 175761.3 -217789.9 175474.8 -211207.1 175785.9 175825.3 175366.5 176046.9 -222786.6 -216905.3 176719.8 175385.3 -219575.6 101023.1 -220130.7 176222.3 176573.3 -210894.8 176471.7 176475.1 -218391.1 0 
+175928.2 -217671.2 176072.1 175762.4 -217788.4 175475.3 -211230.3 175785.9 175825.6 175366.9 176046.5 -222788.7 -216898.5 176717.8 175386.5 -219634.5 100914.1 -220123.4 176220.6 176573.6 -210857.5 176470.2 176474.7 -218391.5 0 
+175930.2 -217666.7 176073.4 175762.8 -217786.1 175475.8 -211239.6 175785.8 175824.5 175366.7 176045.8 -222790.6 -216892 176717.9 175387 -219673.9 100767.6 -220124.1 176219 176574.1 -210888.4 176469.5 176473.4 -218393.5 100 
+175928.4 -217664.2 176073.7 175762.3 -217784.3 175476.3 -211261.3 175787.2 175822.7 175366.2 176046.1 -222788.4 -216891.2 176719.7 175385.8 -219690.4 100596.8 -220132.6 176218.8 176575.1 -210933.7 176470.3 176472.9 -218396 0 
+175924.9 -217664.2 176074.2 175762 -217785.2 175476 -211282.9 175790.7 175823.1 175367.1 176047.9 -222781.2 -216890.2 176722.5 175384.7 -219684.8 100422.7 -220134.9 176219.5 176577 -210951.2 176472.6 176474.2 -218395.3 0 
+175925.3 -217667.2 176073.7 175762.5 -217789.9 175474.7 -211273.6 175790 175825 175367.7 176048.3 -222778.3 -216891.1 176723.3 175385.1 -219656.6 100272.7 -220124.3 176219.6 176576.5 -210948.2 176472.8 176474.4 -218393.9 0 
+175929 -217670.5 176071.2 175762.8 -217794.5 175473.7 -211234.5 175785.2 175825.7 175366.6 176047 -222783.3 -216901.2 176721.1 175385.7 -219621.8 100189.3 -220114.7 176219.3 176574.1 -210944 176470.8 176473.1 -218392.6 0 
+175931.1 -217668.1 176070.5 175762.3 -217794.8 175474.4 -211197.6 175783.1 175824.7 175366.1 176046.4 -222785.8 -216911.1 176719.8 175386.3 -219599.4 100167.7 -220120.4 176219.2 176574.1 -210945.4 176471.6 176473.3 -218389.7 0 
+175930.1 -217664.3 176072.4 175761.6 -217791.4 175475.4 -211187.8 175784.1 175824 175367.3 176046.4 -222783.3 -216910.2 176720.9 175386.9 -219590.8 100137.8 -220139.6 176219.3 176574.2 -210959.4 176473.8 176474.5 -218390.1 0 
+175927.6 -217666.1 176072.8 175761.7 -217786.1 175475 -211190.9 175785.2 175824.5 175368 176045.5 -222782.5 -216903.8 176721.9 175386.8 -219590.8 100073.1 -220156.2 176220.1 176572.4 -210986.1 176473 176474.7 -218393.6 0 
+175926 -217667.7 176072 175762.9 -217779.8 175474.6 -211183.1 175786.5 175825.3 175368.1 176045.4 -222782 -216901.8 176721.5 175386.3 -219582.4 100013.7 -220155.3 176221.4 176572.8 -210994.2 176471.3 176474.9 -218392.6 0 
+175926.7 -217664.9 176072.7 175763.5 -217777.8 175475.3 -211176.7 175787.1 175825.7 175368.5 176047.3 -222779 -216905.6 176720.9 175386 -219539.7 99977.3 -220140.2 176222.1 176575.3 -210958 176472 176475.6 -218389.3 0 
+175927.7 -217666.1 176073.2 175762 -217785.3 175475.3 -211186.3 175785.6 175825.5 175368 176048.3 -222779.3 -216908.8 176719.7 175385.8 -219472.9 99942.57 -220127.6 176221.9 176575 -210904 176472.4 176475.6 -218390.5 0 
+175927.4 -217673.2 176071.8 175760.1 -217794.5 175474.3 -211189.3 175783.8 175824.7 175366.5 176046.6 -222783.3 -216906.5 176718.2 175386.6 -219421.6 99872.88 -220123.7 176221 176572.3 -210885.3 176470.6 176475.2 -218394 0 
+175927.6 -217675.3 176071.4 175760.5 -217794.8 175474 -211173.9 175784.6 175824.3 175366.4 176045.2 -222784.1 -216898.1 176718.1 175388.1 -219387.2 99745.95 -220116.9 176220.5 176572.9 -210919.1 176470.3 176475.4 -218393.6 0 
+175928.4 -217668.9 176073.3 175762.4 -217790 175474.9 -211167.9 175787.4 175825.3 175368.5 176046.7 -222783 -216889.1 176719.2 175387.9 -219347.4 99588.84 -220101.5 176221.5 176575.9 -210972.9 176472.5 176475.6 -218391 0 
+175928.6 -217661.2 176074.3 175763.3 -217788.8 175474.5 -211195.5 175787.5 175826.9 175369.6 176048 -222785.2 -216888.5 176718.8 175385.3 -219310.2 99428.38 -220088.8 176221.3 176575.8 -211015.9 176472 176475 -218391.9 0 
+175928.5 -217655.7 176073.5 175762.9 -217792.3 175473.5 -211239.2 175785.4 175826.9 175368.6 176046.8 -222787.3 -216893.5 176718.2 175383.6 -219286.1 99244.26 -220080.9 176219.6 176573.5 -211054 176469.5 176474.8 -218394 0 
+175928.3 -217650.8 176073.2 175762.5 -217799 175474.9 -211248.5 175785.9 175825.1 175367.7 176046 -222785.6 -216891.4 176720.1 175384.9 -219261.4 99028.59 -220070.2 176219.8 176574.3 -211097.9 176470.6 176475.4 -218393.1 0 
+175927.5 -217647.1 176073 175761.6 -217805.7 175476.6 -211198.9 175787.9 175823.4 175367.6 176047.3 -222783.4 -216884.2 176722.3 175386.8 -219232.4 98824.77 -220066 176221.2 176576.4 -211131.6 176473.1 176475.5 -218392.5 0 
+175926.5 -217646.3 176071.2 175760.6 -217807.1 175475.8 -211137 175787.2 175823.2 175367.1 176047.5 -222784.1 -216885 176721.8 175386.9 -219208.8 98674.22 -220077.5 176220.7 176576.1 -211138.6 176472.5 176474.4 -218394.5 0 
+175926.1 -217645.2 176070.4 175761.4 -217804.1 175473.8 -211124.2 175785.4 175824.3 175366.5 176046.3 -222785.3 -216890.8 176720.3 175386.7 -219185.5 98561.77 -220089 176219.2 176575.4 -211133.7 176470.9 176473.6 -218395.1 0 
+175927.2 -217639 176072.2 175763.3 -217800.8 175472.7 -211162.4 175785.8 175824.7 175366.7 176047.4 -222784.4 -216888.9 176719.3 175386.9 -219157.7 98435.34 -220087.6 176219.1 176576.4 -211146.4 176471.4 176474 -218390.8 0 
+175928.6 -217633.3 176073.2 175763.6 -217797.9 175473.1 -211212.8 175787.3 175823.7 175367.2 176050.4 -222782.3 -216885.3 176718.7 175386.2 -219143.3 98264.34 -220083.3 176219.8 176576.9 -211188.5 176472.5 176475.1 -218384.6 0 
+175928.1 -217635.6 176071.8 175762 -217795.8 175473.7 -211254.7 175786.9 175823.5 175367.2 176050.1 -222782.5 -216894.3 176718.9 175385.2 -219159.3 98064.2 -220089.3 176220.2 176575.3 -211248.1 176471.9 176475 -218381.8 0 
+175926.7 -217642.4 176070.5 175760.2 -217795.7 175472.6 -211296.3 175784.8 175824.8 175366.6 176046.6 -222786 -216908.6 176720 175385.2 -219186.5 97870.14 -220103.6 176220.9 176572.9 -211302.1 176470.1 176472.6 -218383 0 
+175927.2 -217647.4 176070.8 175759.4 -217795.9 175471.8 -211338.3 175783.7 175825 175365.5 176044 -222789.3 -216910 176719.7 175385.3 -219193.4 97683.73 -220110.2 176220.9 176571.6 -211323.3 176468.8 176470.9 -218385.7 0 
+175928.6 -217648.3 176071.8 175760.3 -217796.4 175473.1 -211350.6 175785 175823.5 175365.1 176044.5 -222788.8 -216900.1 176719 175384.8 -219187.8 97477 -220104.2 176219.9 176573.1 -211293.8 176469.6 176472.2 -218388.1 0 
+175928.9 -217646 176072.3 175761.9 -217799.8 175475 -211307.8 175786.4 175823.4 175366.4 176045.5 -222787 -216896.5 176719.5 175385 -219197.7 97268.08 -220102.7 176219.5 176575.7 -211231 176471.1 176474 -218391 0 
+175928.7 -217643.4 176072.1 175763 -217802.3 175475.9 -211220.8 175786.5 175825.2 175367.7 176045.2 -222787.2 -216904.4 176719.5 175385.6 -219216.7 97111.12 -220111.7 176219.7 176576.1 -211179 176471.1 176474 -218394.7 0 
+175929 -217642.8 176072.6 175762.6 -217798.9 175476.9 -211119.6 175786.7 175825.4 175367 176044.9 -222785.1 -216912.6 176718.5 175385.8 -219221 97012.27 -220115.5 176219.9 176576 -211158.2 176472.1 176474.1 -218396 0 
+175928.4 -217643.6 176073.7 175761.8 -217795.8 175477 -211033.9 175786.3 175823.5 175366.2 176045.3 -222781.7 -216912.2 176718.3 175386 -219216.3 96940.88 -220114 176220.8 176576.4 -211158.5 176473.7 176475 -218393.8 0 
+175926.4 -217644.7 176072.5 175761.8 -217797.8 175475 -210973.9 175784.3 175822.8 175366.8 176044.5 -222783.5 -216903.3 176718.5 175385.9 -219221.3 96869.21 -220119.2 176221.6 176574.8 -211183.7 176472.2 176474.3 -218392.5 0 
+175925.2 -217646.9 176070.3 175762.5 -217796.5 175473.2 -210901.8 175783.6 175823.5 175367.6 176043.6 -222785.7 -216891.3 176719.1 175385.2 -219236.2 96760.98 -220119.7 176221.4 176572.6 -211242.1 176469.8 176472.5 -218392.3 0 
+175926.1 -217653.6 176070.8 175763.3 -217792 175473.8 -210776.9 175785.8 175823.7 175367.8 176045 -222782.3 -216885.3 176721.4 175385 -219253.3 96609.38 -220109.8 176221.3 176573.2 -211309.1 176470.5 176472 -218392.5 0 
+175927.8 -217662.7 176072.5 175763.7 -217795.1 175474.6 -210630.6 175787.3 175823.9 175367.4 176047.2 -222779.3 -216886.2 176723.5 175385.4 -219274.1 96481.66 -220110.1 176222.2 176574.8 -211352.6 176471.7 176472.3 -218395.2 100 
+175928.1 -217668 176072.2 175763 -217800.8 175474.2 -210527.4 175786.1 175824.4 175367.1 176048 -222780.6 -216886.1 176722.9 175385.5 -219304.9 96448.75 -220123.6 176222.4 176575.1 -211372.2 176470.8 176472.3 -218399.3 0 
+175927.1 -217668.2 176071.3 175761.7 -217796.4 175473.6 -210479.1 175785 175824.4 175367.2 176047.8 -222782.1 -216885.2 176720.9 175386 -219339.3 96480.57 -220133.5 176220.9 176574.8 -211379.3 176469.5 176472.6 -218401.3 0 
+175926.3 -217664 176071.9 175761 -217789.9 175473 -210458.6 175786 175824.1 175367.5 176047 -222781.6 -216886.2 176719.7 175387 -219345.9 96461.01 -220139.3 176219.6 176575.3 -211382.9 176470.4 176474.2 -218399.7 0 
+175925 -217656 176072.7 175761 -217793.3 175473 -210441.8 175786.9 175824.3 175367.8 176045.8 -222781.7 -216884.1 176719.3 175387.9 -219300.3 96320.88 -220141.7 176219.6 176575.3 -211404.3 176472.2 176475.8 -218395 0 
+175922.7 -217650.2 176072.1 175761.3 -217796.9 175473.6 -210410.6 175786 175824.8 175367.8 176045.5 -222784.9 -216878.5 176719 175387.9 -219231.3 96110.78 -220133.8 176220.4 176574.4 -211443.4 176471.6 176475.4 -218391.1 0 
+175923.2 -217648.1 176071.4 175761.6 -217791 175474.2 -210366.1 175785.7 175824.7 175367.1 176046.3 -222788.7 -216875.9 176718.7 175387.2 -219193.6 95904.71 -220126 176221 176574.3 -211460.8 176469.8 176474.2 -218391.4 0 
+175926.8 -217643.2 176071.1 175761.3 -217786.6 175474.6 -210327.2 175787.3 175824.3 175366.3 176046.9 -222788 -216876 176719.4 175386.4 -219192.9 95705.7 -220129.3 176220.6 176574.9 -211449.8 176470.1 176473.5 -218392.4 0 
+175928.2 -217638.1 176071 175760.4 -217790.8 175475.1 -210302.8 175788.3 175824.6 175366.7 176047 -222785.2 -216874.5 176720.9 175386.2 -219190.6 95490.06 -220135.8 176220.4 176574.9 -211444.6 176471.1 176473.6 -218391.1 0 
+175927.1 -217639.9 176071.1 175760.5 -217795.5 175475.4 -210300.5 175787.9 175825.2 175367.4 176046.9 -222784 -216873.1 176721 175386.3 -219177.4 95272.24 -220139.5 176221 176574.7 -211447.7 176470.6 176473.9 -218389.9 0 
+175927.5 -217643.8 176071.2 175761.7 -217797.4 175474.5 -210331.1 175786.4 175825.4 175367.1 176046.2 -222784.6 -216876.5 176719.3 175385.7 -219184.7 95082.62 -220142.3 176221.5 176574.1 -211439.6 176469.8 176473.4 -218391.8 0 
+175928.3 -217640.8 176071.3 175762.3 -217798.1 175473.6 -210373.4 175784.9 175825.5 175367.1 176045.3 -222786.5 -216886.5 176718.4 175385 -219214 94910.6 -220142.9 176221.4 176573.6 -211430.5 176470.2 176472.8 -218395.1 0 
+175928.1 -217630.9 176071.9 175761.5 -217795 175473.7 -210396.6 175785 175824.9 175368.2 176045 -222788 -216897.9 176718.8 175384.9 -219225.4 94720.86 -220142.6 176221 176574.2 -211441.1 176470.7 176473.1 -218395.8 0 
+175928.3 -217624.3 176071.9 175760.6 -217786.2 175473.3 -210410.6 175785.5 175823.5 175368.2 176044.7 -222787.7 -216903.8 176719.1 175384.8 -219205.2 94518.72 -220141.6 176220.3 176574.2 -211470.2 176469.2 176473.8 -218393.1 0 
+175927.3 -217626.5 176071.5 175761.6 -217777.5 175472.1 -210427.5 175785.2 175822.7 175366.1 176044.8 -222786.1 -216903.4 176719.2 175384.8 -219173.6 94316.63 -220132.6 176219.6 176573.5 -211504.4 176467.5 176474.4 -218389.1 0 
+175925 -217631.4 176072.1 175763.3 -217776.9 175472.7 -210425.6 175785.6 175822.7 175364.9 176045.9 -222782.5 -216897.7 176719.5 175385.3 -219139.9 94083.08 -220119 176219 176574 -211531.7 176468.9 176475.4 -218385.3 0 
+175925.5 -217633.4 176072.9 175763.8 -217781.5 175474.8 -210403.3 175787 175822.7 175366 176047.3 -222779.1 -216887.3 176719.9 175385.4 -219110.5 93827.17 -220117.8 176218.4 176574.9 -211537.6 176471.5 176475.8 -218384.4 0 
+175929 -217635.5 176072.6 175763.5 -217782.8 175475 -210397.4 175787.8 175822.7 175367.8 176047.8 -222779.8 -216878.5 176720 175384.7 -219098.4 93616 -220129.6 176218.4 176574.9 -211511.5 176472.1 176475.2 -218387.1 0 
+175930.5 -217638.6 176071.8 175762.9 -217782.6 175473.8 -210426 175788 175822.9 175368.1 176047.1 -222783 -216876.6 176719.8 175384.1 -219097.9 93443.48 -220135.6 176219.8 176575.5 -211460.2 176471.8 176474.8 -218389.3 0 
+175928.6 -217641.9 176070.7 175761.8 -217782.3 175473.7 -210471.1 175785.8 175823.5 175366.8 176045.8 -222786.7 -216878.9 176719.5 175384.6 -219096.4 93239.13 -220127.9 176221 176575.5 -211402.6 176470.2 176473.6 -218390.2 0 
+175926.4 -217646.5 176069.1 175760.7 -217777.3 175474 -210512.4 175781.8 175824 175365.8 176044.4 -222789.9 -216881 176718.8 175385.1 -219097.9 92996.24 -220116.2 176220.3 176572.9 -211345.8 176467.7 176471 -218391.3 0 
+175926.5 -217650.5 176070 175760.6 -217770.4 175473.3 -210542.5 175782.3 175824.1 175366.2 176044.4 -222788 -216880.3 176718.7 175385.4 -219104 92751.96 -220110.3 176219.8 176572 -211281 176468.8 176471.2 -218388.4 0 
+175927.5 -217651.6 176072.9 175761.5 -217769.1 175472.9 -210562.6 175786.5 175824.5 175366.8 176046.1 -222783 -216877.7 176720.1 175386.3 -219105 92503.37 -220108.5 176220.8 176574.5 -211211.9 176472.6 176474.6 -218380.6 0 
+175926.3 -217650.9 176073 175761.6 -217771.5 175473 -210575.9 175787.1 175824.5 175366.6 176047 -222782.7 -216875.1 176720.6 175386.4 -219099.6 92229.88 -220105.4 176220.4 176574.7 -211157.6 176472.3 176475.2 -218376.3 0 
+175923.6 -217647.8 176070.4 175760.1 -217771.7 175473.5 -210581 175785.3 175823.1 175366.2 176045.9 -222784.1 -216871.5 176719.4 175385 -219096.6 91936.25 -220103.4 176218.2 176571.9 -211123.3 176469.2 176472.9 -218377.5 0 
+175923.5 -217644.1 176069.7 175759.3 -217772.2 175474.8 -210573.6 175785.8 175822.2 175366.2 176045.5 -222781.4 -216869.7 176719.2 175384.5 -219100.4 91642.61 -220110.3 176217.8 176571.5 -211112.4 176469.6 176472.7 -218378.3 0 
+175925.5 -217645.9 176070.8 175760.4 -217778.5 175476.2 -210560.6 175787.3 175823 175366.5 176046.4 -222778.6 -216873.4 176720 175385.6 -219119.3 91370.91 -220126.4 176218.7 176573.5 -211126.5 176472.3 176474.8 -218376.5 0 
+175926 -217650.7 176071.2 175761.7 -217787.3 175476.4 -210564.7 175787.1 175823.9 175366.6 176047.2 -222779.1 -216880.8 176719.7 175385.6 -219165.5 91136.69 -220143.8 176218.3 176574 -211124.9 176472.1 176475.6 -218375.2 0 
+175925.3 -217650.5 176070.5 175761.8 -217791.4 175475.6 -210599.5 175786.3 175824.2 175366.6 176047.4 -222779.4 -216886.1 176718.2 175384.3 -219227.3 90937.71 -220153 176217.5 176573 -211083.3 176469.9 176474.4 -218377.4 0 
+175925.9 -217645.2 176071.1 175761 -217791 175474.7 -210650.3 175786.1 175824.6 175366.7 176047.5 -222777.8 -216886 176717.2 175384.4 -219278.1 90760.42 -220148.4 176218.5 176573.1 -211055.5 176469.2 176474.1 -218380.9 0 
+175927.2 -217642 176073 175761.2 -217791.2 175474.7 -210696.8 175786.8 175825 175366.8 176047.8 -222777.8 -216884.3 176717.6 175386.1 -219309 90602.65 -220132 176219.8 176574.6 -211077.5 176469.7 176474.6 -218381.6 0 
+175927.7 -217643.5 176073.2 175761.8 -217792.5 175474.3 -210731.1 175786.1 175824.7 175367 176047.3 -222780.5 -216883.8 176718 175386.9 -219320.8 90487.97 -220110.3 176219.5 176574.9 -211112.4 176469.6 176473 -218380.9 0 
+175927 -217645.2 176071.5 175760.6 -217790.3 175472.4 -210758.4 175784 175823.7 175367.1 176045.4 -222781.4 -216880.7 176717.3 175386 -219312.7 90431.61 -220089.9 176218.5 176573.3 -211139.2 176469 176470.5 -218382 0 
+175926.2 -217643.8 176070.8 175759.2 -217785.8 175470.7 -210798 175783.6 175822.5 175366.8 176043.8 -222779.6 -216878 176716.9 175384.6 -219297.6 90378.92 -220078.8 176218.1 176572.3 -211168.8 176468.8 176470.1 -218382.7 0 
+175926.1 -217644.3 176071.5 175759.7 -217786.9 175470.8 -210856.1 175784.7 175822.5 175366 176043.6 -222779.2 -216880.7 176718.2 175384.2 -219295 90241.57 -220076.8 176217.7 176572.4 -211184.6 176468.5 176471.1 -218381.2 0 
+175925.9 -217653.3 176071.8 175760.2 -217792.5 175471.2 -210901.8 175784.1 175824.1 175365 176043.6 -222780.3 -216882.5 176719.6 175384.4 -219308.6 90002 -220070.3 176216.4 176572.3 -211172.2 176468.2 176472.3 -218379.3 0 
+175925.7 -217665.9 176071.3 175760.4 -217792 175471.4 -210907.6 175782.4 175825.2 175364.6 176043.4 -222781.4 -216880.1 176719.9 175384.5 -219327.4 89711.95 -220058.6 176216.4 176572.8 -211151.5 176468.4 176474.4 -218380.2 0 
+175927 -217669.6 176070.6 175761.3 -217787.2 175472.2 -210886.6 175782.4 175824.9 175365.4 176044.5 -222781.6 -216878.6 176719.7 175384.5 -219338.8 89423.65 -220062.2 176218.9 176573.5 -211132.7 176469.2 176475.9 -218384.2 0 
+175928.2 -217661.4 176070.1 175760.9 -217786.9 175473.2 -210872.9 175784 175824.3 175366.4 176045.6 -222779.4 -216877.6 176719.5 175384.3 -219344 89174.75 -220080.4 176220.5 176572.8 -211106.2 176469.6 176474.2 -218385.8 0 
+175926.9 -217651 176070.2 175759 -217784.5 175473.4 -210887.3 175785.3 175824.1 175366 176045.1 -222778.1 -216875.2 176719.3 175383.5 -219359.7 88973.08 -220088.9 176219 176572.1 -211073.1 176469.5 176471.8 -218382.3 0 
+175924.3 -217647.8 176070.8 175758.5 -217772.5 175472.6 -210917.6 175785.4 175823.5 175364.8 176044.3 -222781.8 -216873.7 176719.1 175383.4 -219396.8 88784.82 -220089.2 176217.2 176572.8 -211036 176469.9 176471.8 -218378.5 0 
+175923.5 -217653.5 176070.9 175760.1 -217761 175472.3 -210940.4 175784.7 175822.3 175365.1 176044.6 -222784.9 -216873.8 176719.1 175384.8 -219440.2 88586.15 -220100.4 176217.2 176573.9 -210990.3 176470.1 176472.5 -218376.9 0 
+175924.9 -217660.4 176070.3 175761.6 -217761.1 175472.7 -210957 175783.7 175822.2 175366.3 176044.5 -222782.5 -216876.8 176718.9 175386.2 -219463.8 88397.79 -220114.4 176217.5 176574.3 -210943.7 176469.2 176472.5 -218375.3 0 
+175926.6 -217659.1 176069.5 175761.9 -217772.6 175472.3 -210970.5 175782.8 175822.2 175366.4 176043.2 -222778.9 -216885.3 176718 175386 -219465.8 88249.6 -220110.5 176217.3 176573.4 -210908.3 176468.2 176472.1 -218376.1 0 
+175926.8 -217651.5 176069.2 175761.1 -217786.7 175471.8 -210969.6 175783 175821.1 175365.1 176042.7 -222778.3 -216896 176716.8 175384.5 -219469 88162.63 -220089.7 176217.7 176572.5 -210877.1 176468.3 176471.5 -218379.7 0 
+175925.9 -217645.9 176070 175760.2 -217794.4 175472.7 -210968.3 175784.6 175821 175364.5 176044.5 -222779.8 -216900 176716.9 175383.8 -219491.9 88142.2 -220068.3 176218.6 176572.8 -210842.9 176469.5 176471.9 -218383.3 0 
+175925.5 -217644.3 176071.3 175760.2 -217796.2 175473.3 -210990.5 175785.8 175822.6 175365.1 176046.4 -222782.8 -216894.6 176718.4 175384.9 -219533.4 88133.38 -220063.4 176218.8 176572.7 -210823.5 176469.8 176473.5 -218387.5 0 
+175925.2 -217645 176071.2 175760.3 -217796.3 175472.2 -211022.9 175784.4 175823 175365.7 176045.9 -222786.6 -216888.4 176719.2 175385.6 -219564.2 88066.2 -220078.2 176218.4 176570.8 -210828.6 176468.2 176473.6 -218390.5 0 
+175924.6 -217646.4 176070 175759.5 -217794.2 175471.5 -211060.9 175782.4 175821.5 175364.6 176044.2 -222788.3 -216887.7 176718 175384.8 -219551.8 87964.11 -220093.8 176218.9 176570.7 -210836.8 176467.4 176471.7 -218387 0 
+175924.7 -217646.7 176068.9 175759.4 -217789.7 175472.4 -211120.1 175782.7 175820.8 175362.9 176043.8 -222787.3 -216889.8 176717.1 175383.9 -219509 87886.54 -220089.9 176220.1 176573.1 -210834.4 176468.7 176470.7 -218380.7 0 
+175926.2 -217646.2 176069.1 175761.1 -217784.3 175473.5 -211173.1 175784.8 175822.2 175363.5 176045.3 -222786.5 -216891.8 176718.4 175383.9 -219467.1 87815.59 -220069.3 176220.7 176574.6 -210833.7 176470.5 176471.6 -218379.6 0 
+175928 -217646.2 176071.2 175762.2 -217778.1 175473.8 -211183 175786.6 175824.4 175365.8 176046.6 -222784.9 -216894 176719.6 175385.1 -219420.7 87716.88 -220051.1 176220.4 176574.1 -210839.2 176471.4 176473 -218384.4 0 
+175928.2 -217645.3 176073.1 175761.3 -217773.3 175473 -211169.9 175786.5 175825 175366.2 176046.1 -222781.8 -216897.8 176718.7 175386.4 -219370.6 87611.78 -220046.8 176219.3 176573 -210837.1 176470.4 176473.6 -218392.1 0 
+175926.3 -217642.2 176072.5 175759.8 -217775.1 175471.9 -211169.4 175785.6 175822.9 175365 176045.3 -222779.6 -216902.2 176717.6 175385.6 -219350.6 87525.33 -220050.7 176217.7 176572.7 -210831.9 176468.4 176473.9 -218396.2 100 
+175924.9 -217640.1 176071.3 175759.1 -217781.3 175472.5 -211182 175787.1 175821 175365.8 176046 -222779.5 -216904.2 176718.8 175383.8 -219364 87451.48 -220055.8 176217.6 176573.5 -210855.9 176468.6 176475.1 -218391.6 0 
+175925.1 -217645 176071.3 175759.4 -217785 175473.8 -211202.7 175789.2 175821.8 175367.5 176047.3 -222783.2 -216904.6 176720.1 175384.1 -219383.1 87375.12 -220065.8 176218.6 176574.2 -210923.6 176470.2 176475.1 -218383.3 0 
+175925.1 -217657.3 176070.9 175760.3 -217785.8 175473.7 -211250.9 175787.9 175822.8 175367.6 176047.1 -222789.5 -216904.2 176719.5 175384.7 -219398 87294.42 -220079.2 176218.5 176573.5 -211000.8 176469.5 176473.2 -218379.4 0 
+175925.6 -217668.8 176070.4 175761.2 -217787.8 175472.8 -211347 175785.4 175822.3 175367.4 176046.4 -222791.6 -216902.5 176719 175383.7 -219404.8 87218.44 -220082.8 176218 176573.4 -211047.2 176468.5 176472.5 -218381.3 0 
+175926.8 -217672.3 176071 175761.4 -217791.2 175472.9 -211472.5 175784.5 175822.5 175367.6 176046.2 -222786.7 -216902.3 176719.1 175382.7 -219393.2 87140.19 -220071.6 176218.1 176573.9 -211066.5 176469 176472.3 -218387.6 0 
+175926.4 -217669.4 176071.5 175761.1 -217790.2 175473.4 -211590 175784.2 175824.3 175366.9 176045.6 -222782.3 -216904.9 176718.1 175382.8 -219367.1 87038.67 -220060.3 176217.5 176572.8 -211084.5 176468.7 176471.3 -218393.1 0 
+175924.9 -217664.2 176070.8 175760.6 -217781.8 175473.3 -211691.8 175784.3 175824.8 175365.9 176044.7 -222783.5 -216905.4 176716.8 175383 -219342.1 86908.66 -220059.1 176216.8 176571.3 -211103.3 176467.8 176472.5 -218390.9 0 
+175924.5 -217659.4 176070.7 175760.1 -217775.7 175472.5 -211779.6 175784.4 175823.3 175365.9 176044.9 -222786.2 -216902.3 176716.5 175383.3 -219327.6 86759.45 -220056 176217.7 176572.1 -211118.9 176469 176475 -218383.3 0 
+175924.7 -217658.5 176071.4 175759.7 -217778.4 175471.9 -211839.2 175783.6 175821.5 175365.2 176045.7 -222786.1 -216898.5 176716.9 175383.5 -219318.7 86605.09 -220051.5 176218.8 176573.6 -211143.3 176470.6 176474.2 -218377.8 0 
+175924.9 -217661.9 176069.7 175758.4 -217779 175471.7 -211871.4 175782.3 175820.2 175363.5 176044.3 -222786.1 -216897.4 176717.2 175382.7 -219309.6 86454.26 -220066.6 176217.8 176572.5 -211179 176469.7 176470.8 -218376.8 0 
+175926 -217664.4 176065.9 175756.2 -217771.2 175471.1 -211898.7 175780.8 175819.6 175362.5 176042.1 -222787.6 -216898.9 176716.5 175381.9 -219308.2 86291.5 -220093 176216.1 176569.4 -211202.6 176467.2 176468.5 -218378.5 0 
+175926.5 -217658.7 176065.1 175756.1 -217763.4 175470.4 -211926.5 175781 175820.5 175363.2 176042.7 -222785.4 -216898.9 176716.3 175382.5 -219312 86104.34 -220100.1 176216.2 176568.8 -211177.2 176466.8 176469.4 -218378.8 0 
+175925.1 -217645.1 176068.2 175758.3 -217760.5 175470.5 -211939.7 175783.3 175822.2 175364.1 176044 -222779.5 -216896.7 176717.6 175383.5 -219296.4 85917.19 -220088.1 176218.3 176571.6 -211083.2 176468.6 176471.3 -218376.7 0 
+175924.4 -217635.5 176070 175759 -217761.5 175470.3 -211943.7 175784.2 175822.6 175364.6 176042.9 -222776.7 -216898.4 176717.6 175383 -219255.4 85766.71 -220079.4 176219.9 176572.4 -210968.5 176468.9 176471.4 -218376.3 0 
+175925.9 -217634.9 176069.8 175758 -217764.3 175469.6 -211966.2 175783.6 175821.5 175364.9 176042.1 -222778.8 -216903.6 176715.7 175382 -219221.5 85668.27 -220086.8 176219.7 176570.1 -210922.8 176468.2 176471 -218379.9 0 
+175926 -217636.9 176070.3 175758.8 -217764.6 175470.4 -212005.6 175784.8 175820.8 175365.1 176044 -222780.9 -216904.7 176715.9 175382 -219218.4 85600.95 -220107.3 176218.6 176570.4 -210959.9 176469.4 176472.1 -218385.1 0 
+175924.5 -217640.6 176070 175760.6 -217761.8 175471.8 -212022.5 175785.1 175821.1 175364.2 176045.4 -222782.6 -216900.6 176717.8 175382.9 -219224.3 85515.12 -220127.8 176216.8 176572.6 -211002.9 176469.7 176472.4 -218389.3 0 
+175924.7 -217646.4 176068.6 175760.2 -217761.3 175471.5 -212005.7 175782.7 175821.5 175362.4 176044.3 -222785 -216896 176718.3 175383.7 -219214.5 85390.69 -220136.8 176215.2 176572.6 -210993.8 176467.1 176471.1 -218387.6 0 
+175924.5 -217647.2 176068.7 175758.5 -217762.9 175470.7 -212005.7 175782.1 175822.1 175362.7 176043.2 -222784.9 -216892.8 176717.5 175383.9 -219201.5 85261.66 -220136.6 176215.9 176571.3 -210948 176466.9 176471.3 -218379.3 0 
+175921.8 -217642.3 176069.4 175758.4 -217760.9 175471.1 -212076.8 175783.8 175823.2 175365.3 176043.9 -222784.6 -216891.6 176717.2 175383.2 -219197.5 85132.1 -220136.7 176217.4 176570.2 -210915.8 176470 176473 -218375.6 0 
+175921.2 -217637.8 176068.4 175759.3 -217755.9 175471 -212206.6 175783.6 175823.6 175366.2 176045.5 -222788.4 -216891.5 176717.3 175382.2 -219191.7 84949.3 -220140.1 176217.5 176570 -210921 176471 176473.1 -218383.5 0 
+175924.7 -217637.3 176067 175759.4 -217754.7 175469.8 -212334.3 175782 175822.3 175364.5 176046.5 -222789.7 -216889.6 176717.3 175382 -219185.7 84718.94 -220139.6 176217.1 176571.1 -210938.9 176469.6 176470.9 -218391.9 0 
+175926.4 -217641 176067.1 175758.8 -217756.5 175470.2 -212429.8 175782 175820.7 175363.6 176046 -222784.4 -216884.7 176717.4 175383.3 -219198.2 84515.11 -220130.8 176217.8 176572 -210944.8 176469.3 176470 -218389.9 0 
+175924.6 -217647 176069.2 175758.7 -217754.2 175472.3 -212511.5 175783.8 175821.3 175364.8 176045.1 -222779.9 -216880.3 176717.6 175384.8 -219233.7 84345.16 -220119.1 176219.1 176571.7 -210944.2 176469 176471.7 -218384.3 100 
+175923.6 -217652.2 176071 175758.9 -217747 175472.6 -212590.9 175784.5 175822.7 175365.6 176044.7 -222781.8 -216881.9 176717.4 175384.3 -219268.5 84156.82 -220115.1 176219.1 176571 -210943.8 176467.5 176471.8 -218385.6 0 
+175925 -217654.8 176069.9 175758.8 -217743.5 175470.4 -212653.5 175782.2 175821.7 175364 176043.5 -222786.3 -216887.4 176716.1 175382.1 -219275.3 83956.45 -220123.5 176217.2 176570.1 -210940.3 176466.4 176469.2 -218390.9 0 
+175925.9 -217652.9 176067.7 175758.3 -217750.5 175468.5 -212686.4 175780.3 175820.2 175362 176042 -222787 -216890.3 176715.2 175381 -219260.1 83788.66 -220133.6 176215.8 176569.7 -210934.7 176467.1 176468.8 -218390.3 0 
+175924.9 -217646.8 176068.1 175758.7 -217764.7 175470.1 -212693.8 175782.3 175821.1 175362.8 176042.1 -222784.1 -216893 176716.6 175382.1 -219245.8 83662.28 -220134.1 176217.1 176570.5 -210923.8 176468.4 176471.5 -218385.7 0 
+175923.6 -217642.4 176070.2 175760 -217775.4 175472.8 -212691 175785 175823 175365.5 176043.6 -222783.2 -216899 176718.9 175383.3 -219233.5 83554.9 -220133.3 176218.9 176571.4 -210891.2 176468.8 176472.1 -218385.6 0 
+175924.1 -217642.2 176070.8 175761 -217774.7 175473.1 -212693.8 175784.7 175823.2 175366.8 176044.2 -222784 -216901.3 176719.3 175383.5 -219217.8 83431.59 -220134.8 176218.9 176571.5 -210838.8 176468.3 176470.4 -218390 0 
+175925.7 -217642.1 176069.8 175760.8 -217766.8 175472.6 -212718.4 175783.5 175822.9 175365.9 176044 -222782.6 -216896 176718.2 175383.5 -219207.6 83270.18 -220127.9 176218.3 176571.3 -210804.3 176468.2 176470.2 -218390.9 0 
+175926.3 -217639 176069.8 175758.9 -217761.1 175473.3 -212775.7 175784.2 175823 175363.7 176044.7 -222778.9 -216890.9 176717.2 175383.7 -219208.6 83090.46 -220120.3 176218.4 176571.8 -210808.9 176469.8 176472.1 -218384.7 0 
+175925 -217635.7 176070.6 175757.6 -217760 175473.4 -212846.3 175785.1 175822.1 175362.4 176045.2 -222774.9 -216888.7 176717.1 175383.9 -219223.9 82942.31 -220128.3 176218.3 176573.1 -210825.5 176471.2 176474.3 -218377.7 0 
+175923.3 -217637.4 176070.2 175758.8 -217760.7 175472.7 -212891.4 175784 175820.9 175363.4 176043.9 -222772.8 -216880.5 176717.6 175383.8 -219260.6 82864.95 -220137.9 176217.3 176573.2 -210826.6 176470 176474.4 -218376.7 0 
+175923.5 -217646.3 176068.9 175760.4 -217763.1 175472.8 -212912.4 175782.2 175821.8 175364.7 176042.8 -222774.1 -216868.3 176718.2 175383.5 -219305.2 82841.38 -220132.6 176216.3 176572.2 -210817.1 176468.4 176472.7 -218380.2 0 
+175926.2 -217660.3 176067.6 175760.2 -217767.6 175472.5 -212954.5 175781.8 175822.7 175365 176043.6 -222778.9 -216867.8 176718.5 175383.1 -219326.2 82829.51 -220122.7 176215.5 176571.9 -210809.8 176468.8 176471.6 -218383.4 0 
+175928.9 -217672.6 176067.3 175759.5 -217771.3 175471.1 -213035.8 175782 175820.9 175365.7 176044.5 -222783.1 -216879.3 176718.5 175382.4 -219314 82844.29 -220119 176216 176571.7 -210813.9 176469 176472 -218383.3 0 
+175928 -217676.8 176068.7 175759.6 -217772.2 175470.1 -213124.4 175782.3 175819.3 175366.7 176045 -222782.8 -216885.4 176718.2 175381.5 -219285.7 82907.99 -220116.7 176217.8 176570.3 -210836 176467.4 176472.5 -218381.1 0 
+175925 -217673.9 176070.4 175759.6 -217771.6 175470.6 -213189.4 175782.2 175820.2 175365.9 176045.2 -222780 -216878.6 176717.5 175381.5 -219265.2 82971.16 -220112.2 176217.9 176569.5 -210862.6 176466 176471.8 -218381 0 
+175924.1 -217669.1 176070.5 175759.9 -217772.2 175471.2 -213229 175782.3 175822.3 175364.3 176044.3 -222777.6 -216869.2 176717.2 175382.4 -219270.5 82988.4 -220110.7 176216.8 176571.2 -210857.3 176466.5 176470.3 -218382.8 0 
+175924.2 -217666.2 176069.1 175760.6 -217772.3 175471.2 -213249.1 175783.4 175823.5 175364.1 176043.7 -222776.8 -216868.4 176717.8 175383.3 -219295.4 82974.43 -220118.4 176216.9 176572.8 -210797.4 176468.2 176470 -218382.8 0 
+175923.7 -217665.6 176067.9 175760.3 -217768.1 175471.2 -213259.6 175783.9 175822.3 175364.7 176044.4 -222778.9 -216873.7 176717.9 175383.2 -219315.4 82942.8 -220129.9 176217 176571.6 -210707.7 176469.3 176471.3 -218383.5 0 
+175923.6 -217665.7 176068.3 175759.3 -217762.3 175472.1 -213272.5 175782.8 175820.2 175364.3 176044.4 -222781.9 -216875.7 176717.5 175382.2 -219322.3 82871.44 -220131 176215.7 176570.4 -210645.4 176469.7 176472.4 -218385.1 0 
+175923.2 -217665.4 176069.2 175759.5 -217763.6 175472.9 -213277.8 175781.7 175820.1 175363.6 176043.3 -222781.3 -216878.3 176717.8 175381.4 -219329.7 82743.8 -220123.5 176215.2 176571.9 -210645.9 176469.6 176471.8 -218382.5 0 
+175922.5 -217666 176068.8 175760.3 -217770 175471.4 -213250.7 175781.7 175821 175363.6 176042.6 -222777.5 -216887.1 176717.9 175381.2 -219349.9 82576.74 -220121.8 176215.8 176573.4 -210690.3 176468.6 176470.3 -218376.5 0 
+175923.5 -217668.3 176068.3 175759.5 -217770.1 175469.2 -213198 175783 175820.5 175363.7 176043.4 -222774.5 -216891.2 176717.3 175381.7 -219383.6 82393.09 -220125.2 176216.1 176573.2 -210745.2 176467.6 176469.7 -218372.5 0 
+175925.1 -217670 176069.2 175758.6 -217765.8 175469.7 -213164.1 175784.4 175820.4 175363.3 176044.8 -222774.4 -216885.3 176716.7 175382.6 -219419.7 82211.74 -220127.6 176215.9 176572.7 -210804.5 176467.5 176470.4 -218372.2 0 
+175925.3 -217667.1 176070.6 175759 -217768.6 175471.8 -213171.5 175784.2 175822 175362.8 176044.8 -222775.3 -216881.8 176716.4 175382.5 -219441.3 82063.97 -220129.9 176216.3 176572.8 -210869.8 176467.8 176471.5 -218375.2 0 
+175924.8 -217656.6 176070.3 175759.2 -217777.2 175472 -213187.9 175781.6 175822.8 175363.4 176043.9 -222776.7 -216887.9 176716 175381.4 -219443.2 81964.64 -220130.5 176217.2 176572.8 -210928.5 176467.2 176472.2 -218380.5 0 
+175922.9 -217642.6 176068.8 175758.1 -217781.8 175470.9 -213174 175779.3 175821.8 175364.4 176044.6 -222777.2 -216895.2 176716 175381.2 -219437 81881.98 -220131.7 176217.5 176572.3 -210972.6 176467.5 176472.1 -218385.6 0 
+175921.4 -217632.4 176068.1 175757.2 -217779.8 175471 -213136.7 175780.4 175820.6 175364.7 176045.9 -222774.6 -216895.8 176716 175381.4 -219432.5 81766.55 -220137.7 176217.3 176571.6 -211015.5 176469.1 176471 -218389.4 0 
+175922.9 -217628.1 176068.6 175758 -217775.6 175471.1 -213106.4 175783.4 175820 175364.2 176044.3 -222772.1 -216892.7 176715.4 175381.4 -219431.9 81600.86 -220142.4 176217.4 176570.9 -211071 176468.6 176469.9 -218391.6 0 
+175924.9 -217628.3 176069.5 175759.2 -217772.7 175470.3 -213087.4 175784.4 175820.1 175363.8 176041.3 -222774.2 -216888.4 176715 175382 -219443.9 81425.63 -220139.5 176217.5 176570.4 -211128.6 176467.1 176470.1 -218388.9 0 
+175924.3 -217632 176071 175759.6 -217772.9 175470.5 -213072.4 175784.3 175821 175364.1 176041.6 -222776 -216882.3 176716.1 175382.8 -219467.2 81291.93 -220132 176217.2 176571.4 -211164.8 176468.3 176471.7 -218380.7 0 
+175923 -217636.7 176071.8 175759.2 -217774.4 175471.2 -213056.1 175784.5 175821.6 175364.7 176044.2 -222775 -216877.7 176716.9 175382.7 -219479.1 81193.45 -220127.8 176216.6 176572.3 -211177.5 176469.8 176472.4 -218374.4 0 
+175923.3 -217639.7 176069.7 175758.4 -217772.1 175471 -213013.9 175783.2 175820.8 175364.9 176044.5 -222775.1 -216878.8 176716.1 175382.1 -219472.8 81092.32 -220131.3 176216 176570.8 -211189.1 176467.8 176470.8 -218375.5 0 
+175924.5 -217643 176067.8 175757.8 -217766.7 175471.3 -212944.7 175781.6 175819.8 175365.3 176042.6 -222775.5 -216884.8 176716 175382.4 -219472.3 80968.45 -220134.3 176216.1 176569.7 -211211.1 176465.8 176469.6 -218380.3 0 
+175925.2 -217648.2 176068.3 175758 -217765.1 175472.6 -212893.9 175782.3 175819.9 175365.5 176042.1 -222773.4 -216894 176717.4 175383.4 -219484.6 80824.27 -220126.5 176217.1 176571.8 -211230.9 176466.7 176470.4 -218384.8 0 
+175925.5 -217650.6 176068.8 175758.8 -217768.2 175472.2 -212873.7 175783.9 175820.6 175364.4 176043.3 -222771.9 -216900.3 176718 175383.4 -219487.9 80685.07 -220117 176217.7 176572.9 -211240.5 176467.9 176471 -218388.5 0 
+175926.3 -217646.3 176067.6 175759.1 -217771.3 175470.7 -212849.7 175783.1 175820.6 175363 176044.3 -222773.1 -216895.1 176716 175382.5 -219480.4 80561.98 -220119.6 176216.6 176570.6 -211243.1 176467.1 176469.9 -218389.2 0 
+175926.7 -217640 176066.8 175759.5 -217774.4 175470.6 -212809.8 175781.4 175820.5 175363.1 176045.1 -222773.6 -216881.8 176714.2 175382.8 -219484.2 80431 -220129.5 176214.8 176569.2 -211240.1 176466.6 176469.1 -218386 0 
+175925.2 -217638.8 176068.2 175759.8 -217779.7 175471.3 -212766.7 175781.9 175820.7 175364.1 176045.6 -222773.2 -216871.6 176715.4 175383.8 -219503.7 80288.99 -220133 176214.4 176570.4 -211246.3 176467.4 176469.7 -218382.4 0 
+175923.9 -217641.6 176070.8 175759.1 -217783.4 175471.7 -212726.3 175784.5 175821.5 175364.8 176044.9 -222773.8 -216869.6 176717.9 175383.8 -219527.7 80146.13 -220129.5 176216.4 176572.1 -211279.3 176468.7 176471 -218380.5 0 
+175923.8 -217643 176072.4 175758 -217781.1 175472.4 -212685.4 175786.1 175822.1 175365.4 176043.7 -222774.1 -216874.9 176718.4 175382.9 -219558 79974.23 -220130.3 176219.1 176572.7 -211326 176470.1 176471.9 -218380.1 0 
+175923.7 -217641.2 176071.5 175757.8 -217774.8 175472.7 -212634.6 175784.8 175822.1 175365.5 176043.8 -222772.9 -216883 176716.8 175382.3 -219596.4 79761.38 -220139.5 176219.5 176571.6 -211364.6 176470.3 176471 -218382.3 0 
+175924.4 -217638 176069.3 175758 -217769.6 175472.1 -212571.6 175783.3 175821.5 175364.2 176044 -222771.6 -216887.8 176715.1 175382.4 -219631.3 79547.5 -220146.2 176217 176569.9 -211403 176468.4 176469.4 -218385 0 
+175926 -217637.2 176067.4 175757.8 -217767 175470.8 -212516.5 175783.3 175820.9 175362.5 176042.9 -222772.6 -216890.1 176714.5 175382.7 -219661.7 79354.85 -220143.3 176214.3 176570.1 -211459.6 176466.3 176469.1 -218382.8 0 
+175925.3 -217639.2 176066.7 175757.5 -217765.7 175469.8 -212493.7 175782.8 175820.1 175362.3 176042.1 -222775.5 -216892.2 176715 175382.2 -219691 79157.4 -220134.9 176213.6 176571.6 -211528.6 176465.7 176470.1 -218376.5 0 
+175922.7 -217638.5 176067.2 175757.3 -217765.3 175469.4 -212491.5 175781.6 175818.9 175363.4 176042.1 -222777.2 -216892.1 176716.1 175381.2 -219702.7 78949.36 -220127.8 176214.8 176572 -211585 176466.7 176470.8 -218373.5 0 
+175921.6 -217635.8 176067.2 175757 -217767.7 175468.4 -212476.6 175780.4 175818 175363.4 176041.1 -222776.6 -216891.3 176716.7 175381.1 -219682.4 78778.51 -220126.3 176216.2 176570 -211610.9 176466.8 176469.9 -218378.3 0 
+175922.8 -217634.4 176066.8 175756.9 -217771.5 175467.2 -212445.5 175779.8 175817.9 175362.1 176040.3 -222775.2 -216889 176716.7 175381.6 -219645.4 78661.9 -220122.2 176216 176567.8 -211606.1 176465.3 176468.2 -218387.9 0 
+175924.3 -217630 176067.7 175757.1 -217773.7 175468.2 -212416.7 175780.7 175818.5 175361.7 176041.7 -222773.9 -216879.8 176716.8 175381.5 -219617.3 78552.48 -220101.5 176215.7 176568.4 -211592.5 176464.7 176468.8 -218391.7 0 
diff --git a/mne/io/edf/tests/data/test_uneven_samp.edf b/mne/io/edf/tests/data/test_uneven_samp.edf
new file mode 100755
index 0000000..b0261c0
Binary files /dev/null and b/mne/io/edf/tests/data/test_uneven_samp.edf differ
diff --git a/mne/io/edf/tests/data/test_uneven_samp.mat b/mne/io/edf/tests/data/test_uneven_samp.mat
new file mode 100644
index 0000000..62ed2e8
Binary files /dev/null and b/mne/io/edf/tests/data/test_uneven_samp.mat differ
diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py
index baf4a71..7d68102 100644
--- a/mne/io/edf/tests/test_edf.py
+++ b/mne/io/edf/tests/test_edf.py
@@ -1,66 +1,84 @@
 """Data Equivalence Tests"""
 from __future__ import print_function
 
-# Authors: Teon Brooks <teon at nyu.edu>
+# Authors: Teon Brooks <teon.brooks at gmail.com>
 #          Martin Billinger <martin.billinger at tugraz.at>
+#          Alan Leggitt <alan.leggitt at ucsf.edu>
+#          Alexandre Barachant <alexandre.barachant at gmail.com>
 #
 # License: BSD (3-clause)
 
 import os.path as op
 import inspect
+import warnings
 
 from nose.tools import assert_equal, assert_true
-from numpy.testing import assert_array_almost_equal, assert_array_equal
-from numpy.testing import assert_raises
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_raises, assert_allclose)
 from scipy import io
 import numpy as np
 
+from mne import pick_types, concatenate_raws
 from mne.externals.six import iterbytes
-from mne.utils import _TempDir
-from mne import pick_types
-from mne.io import Raw
-from mne.io import read_raw_edf
+from mne.utils import _TempDir, run_tests_if_main, requires_pandas
+from mne.io import Raw, read_raw_edf, RawArray
+from mne.io.tests.test_raw import _test_concat
 import mne.io.edf.edf as edfmodule
 from mne.event import find_events
 
+warnings.simplefilter('always')
+
 FILE = inspect.getfile(inspect.currentframe())
 data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
-hpts_path = op.join(data_dir, 'biosemi.hpts')
+montage_path = op.join(data_dir, 'biosemi.hpts')
 bdf_path = op.join(data_dir, 'test.bdf')
 edf_path = op.join(data_dir, 'test.edf')
+edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
 bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
 edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
+edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
+edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
+edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
+
+
+eog = ['REOG', 'LEOG', 'IEOG']
+misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
+
 
-tempdir = _TempDir()
+def test_concat():
+    """Test EDF concatenation"""
+    _test_concat(read_raw_edf, bdf_path)
 
 
 def test_bdf_data():
-    """Test reading raw bdf files
-    """
-    raw_py = read_raw_edf(bdf_path, hpts=hpts_path, preload=True)
+    """Test reading raw bdf files"""
+    raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog,
+                          misc=misc, preload=True)
+    assert_true('RawEDF' in repr(raw_py))
     picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
     data_py, _ = raw_py[picks]
 
-    print(raw_py)  # to test repr
-    print(raw_py.info)  # to test Info repr
-
     # this .mat was generated using the EEG Lab Biosemi Reader
     raw_eeglab = io.loadmat(bdf_eeglab_path)
     raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
     data_eeglab = raw_eeglab[picks]
-
-    assert_array_almost_equal(data_py, data_eeglab)
+    # bdf saved as a single, resolution to seven decimal points in matlab
+    assert_array_almost_equal(data_py, data_eeglab, 8)
 
     # Manually checking that float coordinates are imported
-    assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
-    assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
-    assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
+    assert_true((raw_py.info['chs'][0]['loc']).any())
+    assert_true((raw_py.info['chs'][25]['loc']).any())
+    assert_true((raw_py.info['chs'][63]['loc']).any())
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
 
 
 def test_edf_data():
-    """Test reading raw edf files
-    """
-    raw_py = read_raw_edf(edf_path, stim_channel=139, preload=True)
+    """Test reading raw edf files"""
+    raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
+                          preload=True)
 
     picks = pick_types(raw_py.info, meg=False, eeg=True,
                        exclude=['EDF Annotations'])
@@ -74,36 +92,60 @@ def test_edf_data():
     raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
     data_eeglab = raw_eeglab[picks]
 
-    assert_array_almost_equal(data_py, data_eeglab)
+    assert_array_almost_equal(data_py, data_eeglab, 10)
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+    # Test uneven sampling
+    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
+    data_py, _ = raw_py[0]
+    # this .mat was generated using the EEG Lab Biosemi Reader
+    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
+    raw_eeglab = raw_eeglab['data']
+    data_eeglab = raw_eeglab[0]
+
+    # match upsampling
+    upsample = len(data_eeglab) / len(raw_py)
+    data_py = np.repeat(data_py, repeats=upsample)
+    assert_array_equal(data_py, data_eeglab)
 
 
 def test_read_segment():
-    """Test writing raw edf files when preload is False
-    """
-    raw1 = read_raw_edf(edf_path, stim_channel=139, preload=False)
+    """Test writing raw edf files when preload is False"""
+    tempdir = _TempDir()
+    raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
     raw1_file = op.join(tempdir, 'test1-raw.fif')
     raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
     raw11 = Raw(raw1_file, preload=True)
     data1, times1 = raw1[:139, :]
     data11, times11 = raw11[:139, :]
-    assert_array_almost_equal(data1, data11, 10)
+    assert_allclose(data1, data11, rtol=1e-6)
     assert_array_almost_equal(times1, times11)
     assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
-
-    raw2 = read_raw_edf(edf_path, stim_channel=139, preload=True)
-    raw2_file = op.join(tempdir, 'test2-raw.fif')
-    raw2.save(raw2_file, overwrite=True)
-    data2, times2 = raw2[:139, :]
-    assert_array_equal(data1, data2)
-    assert_array_equal(times1, times2)
+    data2, times2 = raw1[0, 0:1]
+    assert_array_equal(data2[0], data1[0, 0:1])
+    assert_array_equal(times2, times1[0:1])
+
+    buffer_fname = op.join(tempdir, 'buffer')
+    for preload in (buffer_fname, True, False):  # false here means "delayed"
+        raw2 = read_raw_edf(edf_path, stim_channel=None, preload=preload)
+        if preload is False:
+            raw2.load_data()
+        raw2_file = op.join(tempdir, 'test2-raw.fif')
+        raw2.save(raw2_file, overwrite=True)
+        data2, times2 = raw2[:139, :]
+        assert_allclose(data1, data2, rtol=1e-6)
+        assert_array_equal(times1, times2)
 
     raw1 = Raw(raw1_file, preload=True)
     raw2 = Raw(raw2_file, preload=True)
     assert_array_equal(raw1._data, raw2._data)
 
     # test the _read_segment function by only loading some of the data
-    raw1 = read_raw_edf(edf_path, preload=False)
-    raw2 = read_raw_edf(edf_path, preload=True)
+    raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
+    raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
 
     # select some random range of data to compare
     data1, times1 = raw1[:, 345:417]
@@ -113,19 +155,25 @@ def test_read_segment():
 
 
 def test_append():
-    """Test appending raw edf objects using Raw.append
-    """
-    # Author: Alan Leggitt <alan.leggitt at ucsf.edu>
-    raw = read_raw_edf(bdf_path, hpts=hpts_path, preload=False)
+    """Test appending raw edf objects using Raw.append"""
+    for preload in (True, False):
+        raw = read_raw_edf(bdf_path, preload=False)
+        raw0 = raw.copy()
+        raw1 = raw.copy()
+        raw0.append(raw1)
+        assert_true(2 * len(raw) == len(raw0))
+        assert_allclose(np.tile(raw[:, :][0], (1, 2)), raw0[:, :][0])
+
+    # different types can't combine
+    raw = read_raw_edf(bdf_path, preload=True)
     raw0 = raw.copy()
     raw1 = raw.copy()
-    raw0.append(raw1)
-    assert_true(2 * len(raw) == len(raw0))
+    raw2 = RawArray(raw[:, :][0], raw.info)
+    assert_raises(ValueError, raw.append, raw2)
 
 
 def test_parse_annotation():
-    """Test parsing the tal channel
-    """
+    """Test parsing the tal channel"""
 
     # test the parser
     annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
@@ -147,12 +195,10 @@ def test_parse_annotation():
 
 
 def test_edf_annotations():
-    """Test if events are detected correctly in a typical MNE workflow.
-    """
+    """Test if events are detected correctly in a typical MNE workflow."""
 
     # test an actual file
-    raw = read_raw_edf(edf_path, tal_channel=-1,
-                       hpts=hpts_path, preload=True)
+    raw = read_raw_edf(edf_path, preload=True)
     edf_events = find_events(raw, output='step', shortest_event=0,
                              stim_channel='STI 014')
 
@@ -179,9 +225,9 @@ def test_edf_annotations():
 
 
 def test_write_annotations():
-    """Test writing raw files when annotations were parsed.
-    """
-    raw1 = read_raw_edf(edf_path, tal_channel=-1, preload=True)
+    """Test writing raw files when annotations were parsed."""
+    tempdir = _TempDir()
+    raw1 = read_raw_edf(edf_path, preload=True)
     raw1_file = op.join(tempdir, 'test1-raw.fif')
     raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
     raw11 = Raw(raw1_file, preload=True)
@@ -192,5 +238,38 @@ def test_write_annotations():
     assert_array_almost_equal(times1, times11)
     assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
 
-    assert_raises(RuntimeError, read_raw_edf,
-                  edf_path, tal_channel=-1, preload=False)
+    assert_raises(RuntimeError, read_raw_edf, edf_path, preload=False)
+
+
+def test_edf_stim_channel():
+    """Test stim channel for edf file"""
+    raw = read_raw_edf(edf_stim_channel_path, preload=True,
+                       stim_channel=-1)
+    true_data = np.loadtxt(edf_txt_stim_channel_path).T
+
+    # EDF writer pad data if file to small
+    _, ns = true_data.shape
+    edf_data = raw._data[:, :ns]
+
+    # assert stim channels are equal
+    assert_array_equal(true_data[-1], edf_data[-1])
+
+    # assert data are equal
+    assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
+
+
+ at requires_pandas
+def test_to_data_frame():
+    """Test edf Raw Pandas exporter"""
+    for path in [edf_path, bdf_path]:
+        raw = read_raw_edf(path, stim_channel=None, preload=True)
+        _, times = raw[0, :10]
+        df = raw.to_data_frame()
+        assert_true((df.columns == raw.ch_names).all())
+        assert_array_equal(np.round(times * 1e3), df.index.values[:10])
+        df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
+        assert_true('time' in df.index.names)
+        assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
+
+
+run_tests_if_main()
diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py
index 103e5d2..7b38a5b 100644
--- a/mne/io/egi/egi.py
+++ b/mne/io/egi/egi.py
@@ -1,26 +1,19 @@
 # Authors: Denis A. Engemann  <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
 #          simplified BSD-3 license
 
 import datetime
-import os
 import time
 import warnings
 
 import numpy as np
 
-from ..base import _BaseRaw
-from ..meas_info import Info
+from ..base import _BaseRaw, _check_update_montage
+from ..meas_info import _empty_info
 from ..constants import FIFF
 from ...utils import verbose, logger
 
-_other_fields = [
-    'lowpass', 'buffer_size_sec', 'dev_ctf_t',
-    'meas_id', 'subject_info',
-    'dev_head_t', 'line_freq', 'acq_stim', 'proj_id', 'description',
-    'highpass', 'experimenter', 'file_id', 'proj_name',
-    'dig', 'ctf_head_t', 'orig_blocks', 'acq_pars'
-]
-
 
 def _read_header(fid):
     """Read EGI binary header"""
@@ -32,7 +25,10 @@ def _read_header(fid):
     else:
         ValueError('Watchout. This does not seem to be a simple '
                    'binary EGI file.')
-    my_fread = lambda *x, **y: np.fromfile(*x, **y)[0]
+
+    def my_fread(*x, **y):
+        return np.fromfile(*x, **y)[0]
+
     info = dict(
         version=version,
         year=my_fread(fid, '>i2', 1),
@@ -126,7 +122,8 @@ def _combine_triggers(data, remapping=None):
 
 
 @verbose
-def read_raw_egi(input_fname, include=None, exclude=None, verbose=None):
+def read_raw_egi(input_fname, montage=None, eog=None, misc=None,
+                 include=None, exclude=None, verbose=None):
     """Read EGI simple binary as raw object
 
     Note. The trigger channel names are based on the
@@ -146,6 +143,16 @@ def read_raw_egi(input_fname, include=None, exclude=None, verbose=None):
     ----------
     input_fname : str
         Path to the raw file.
+    montage : str | None | instance of montage
+        Path or instance of montage containing electrode positions.
+        If None, sensor locations are (0,0,0). See the documentation of
+        :func:`mne.channels.read_montage` for more information.
+    eog : list or tuple
+        Names of channels or list of indices that should be designated
+        EOG channels. Default is None.
+    misc : list or tuple
+        Names of channels or list of indices that should be designated
+        MISC channels. Default is None.
     include : None | list
        The event channels to be ignored when creating the synthetic
        trigger. Defaults to None.
@@ -160,90 +167,104 @@ def read_raw_egi(input_fname, include=None, exclude=None, verbose=None):
 
     Returns
     -------
-    raw : instance of mne.io.Raw
-        A raw object containing EGI data.
+    raw : Instance of RawEGI
+        A Raw object containing EGI data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
     """
-    return _RawEGI(input_fname, include, exclude, verbose)
+    return RawEGI(input_fname, montage, eog, misc, include, exclude, verbose)
 
 
-class _RawEGI(_BaseRaw):
+class RawEGI(_BaseRaw):
     """Raw object from EGI simple binary file
     """
     @verbose
-    def __init__(self, input_fname, include=None, exclude=None,
-                 verbose=None):
+    def __init__(self, input_fname, montage=None, eog=None, misc=None,
+                 include=None, exclude=None, verbose=None):
         """docstring for __init__"""
+        if eog is None:
+            eog = []
+        if misc is None:
+            misc = []
         with open(input_fname, 'rb') as fid:  # 'rb' important for py3k
             logger.info('Reading EGI header from %s...' % input_fname)
             egi_info = _read_header(fid)
             logger.info('    Reading events ...')
-            _ = _read_events(fid, egi_info)  # update info + jump
+            _read_events(fid, egi_info)  # update info + jump
             logger.info('    Reading data ...')
             # reads events as well
             data = _read_data(fid, egi_info).astype(np.float64)
-            if egi_info['value_range'] and egi_info['bits']:
-                mv = egi_info['value_range'] / 2 ** egi_info['bits']
+            if egi_info['value_range'] != 0 and egi_info['bits'] != 0:
+                cal = egi_info['value_range'] / 2 ** egi_info['bits']
             else:
-                mv = 1e-6
-            data[:egi_info['n_channels']] = data[:egi_info['n_channels']] * mv
+                cal = 1e-6
+            data[:egi_info['n_channels']] = data[:egi_info['n_channels']] * cal
 
         logger.info('    Assembling measurement info ...')
 
-        event_codes = list(egi_info['event_codes'])
-        egi_events = data[-egi_info['n_events']:]
-
-        if include is None:
-            exclude_list = ['sync', 'TREV'] if exclude is None else exclude
-            exclude_inds = [i for i, k in enumerate(event_codes) if k in
-                            exclude_list]
-            more_excludes = []
-            if exclude is None:
-                for ii, event in enumerate(egi_events):
-                    if event.sum() <= 1 and event_codes[ii]:
-                        more_excludes.append(ii)
-            if len(exclude_inds) + len(more_excludes) == len(event_codes):
-                warnings.warn('Did not find any event code with more '
-                              'than one event.', RuntimeWarning)
+        if egi_info['n_events'] > 0:
+            event_codes = list(egi_info['event_codes'])
+            egi_events = data[-egi_info['n_events']:]
+
+            if include is None:
+                exclude_list = ['sync', 'TREV'] if exclude is None else exclude
+                exclude_inds = [i for i, k in enumerate(event_codes) if k in
+                                exclude_list]
+                more_excludes = []
+                if exclude is None:
+                    for ii, event in enumerate(egi_events):
+                        if event.sum() <= 1 and event_codes[ii]:
+                            more_excludes.append(ii)
+                if len(exclude_inds) + len(more_excludes) == len(event_codes):
+                    warnings.warn('Did not find any event code with more '
+                                  'than one event.', RuntimeWarning)
+                else:
+                    exclude_inds.extend(more_excludes)
+
+                exclude_inds.sort()
+                include_ = [i for i in np.arange(egi_info['n_events']) if
+                            i not in exclude_inds]
+                include_names = [k for i, k in enumerate(event_codes)
+                                 if i in include_]
             else:
-                exclude_inds.extend(more_excludes)
-
-            exclude_inds.sort()
-            include_ = [i for i in np.arange(egi_info['n_events']) if
-                        i not in exclude_inds]
-            include_names = [k for i, k in enumerate(event_codes)
-                             if i in include_]
+                include_ = [i for i, k in enumerate(event_codes)
+                            if k in include]
+                include_names = include
+
+            for kk, v in [('include', include_names), ('exclude', exclude)]:
+                if isinstance(v, list):
+                    for k in v:
+                        if k not in event_codes:
+                            raise ValueError('Could find event named "%s"' % k)
+                elif v is not None:
+                    raise ValueError('`%s` must be None or of type list' % kk)
+
+            event_ids = np.arange(len(include_)) + 1
+            try:
+                logger.info('    Synthesizing trigger channel "STI 014" ...')
+                logger.info('    Excluding events {%s} ...' %
+                            ", ".join([k for i, k in enumerate(event_codes)
+                                       if i not in include_]))
+                new_trigger = _combine_triggers(egi_events[include_],
+                                                remapping=event_ids)
+                data = np.concatenate([data, new_trigger])
+            except RuntimeError:
+                logger.info('    Found multiple events at the same time '
+                            'sample. Could not create trigger channel.')
+                new_trigger = None
+
+            self.event_id = dict(zip([e for e in event_codes if e in
+                                      include_names], event_ids))
         else:
-            include_ = [i for i, k in enumerate(event_codes) if k in include]
-            include_names = include
-
-        for kk, v in [('include', include_names), ('exclude', exclude)]:
-            if isinstance(v, list):
-                for k in v:
-                    if k not in event_codes:
-                        raise ValueError('Could find event named "%s"' % k)
-            elif v is not None:
-                raise ValueError('`%s` must be None or of type list' % kk)
-
-        event_ids = np.arange(len(include_)) + 1
-        try:
-            logger.info('    Synthesizing trigger channel "STI 014" ...')
-            logger.info('    Excluding events {%s} ...' %
-                        ", ".join([k for i, k in enumerate(event_codes)
-                                   if i not in include_]))
-            new_trigger = _combine_triggers(egi_events[include_],
-                                            remapping=event_ids)
-            data = np.concatenate([data, new_trigger])
-        except RuntimeError:
-            logger.info('    Found multiple events at the same time sample. '
-                        'Could not create trigger channel.')
+            # No events
+            self.event_id = None
             new_trigger = None
-
-        self.event_id = dict(zip([e for e in event_codes if e in
-                                  include_names], event_ids))
-        self._data = data
-        self.verbose = verbose
-        self.info = info = Info(dict((k, None) for k in _other_fields))
-        info['sfreq'] = egi_info['samp_rate']
+        info = _empty_info()
+        info['hpi_subsystem'] = None
+        info['events'], info['hpi_results'], info['hpi_meas'] = [], [], []
+        info['sfreq'] = float(egi_info['samp_rate'])
         info['filename'] = input_fname
         my_time = datetime.datetime(
             egi_info['year'],
@@ -256,17 +277,19 @@ class _RawEGI(_BaseRaw):
         my_timestamp = time.mktime(my_time.timetuple())
         info['meas_date'] = np.array([my_timestamp], dtype=np.float32)
         info['projs'] = []
-        ch_names = ['EEG %03d' % (i + 1) for i in range(egi_info['n_channels'])]
+        ch_names = ['EEG %03d' % (i + 1) for i in
+                    range(egi_info['n_channels'])]
         ch_names.extend(list(egi_info['event_codes']))
         if new_trigger is not None:
             ch_names.append('STI 014')  # our new_trigger
-        info['nchan'] = len(data)
+        info['nchan'] = nchan = len(data)
         info['chs'] = []
         info['ch_names'] = ch_names
         info['bads'] = []
         info['comps'] = []
+        info['custom_ref_applied'] = False
         for ii, ch_name in enumerate(ch_names):
-            ch_info = {'cal': 1.0,
+            ch_info = {'cal': cal,
                        'logno': ii + 1,
                        'scanno': ii + 1,
                        'range': 1.0,
@@ -276,47 +299,32 @@ class _RawEGI(_BaseRaw):
                        'coord_frame': FIFF.FIFFV_COORD_HEAD,
                        'coil_type': FIFF.FIFFV_COIL_EEG,
                        'kind': FIFF.FIFFV_EEG_CH,
-                       'eeg_loc': None,
                        'loc': np.array([0, 0, 0, 1] * 3, dtype='f4')}
+            if ch_name in eog or ii in eog or ii - nchan in eog:
+                ch_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+                ch_info['kind'] = FIFF.FIFFV_EOG_CH
+            if ch_name in misc or ii in misc or ii - nchan in misc:
+                ch_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+                ch_info['kind'] = FIFF.FIFFV_MISC_CH
 
             if len(ch_name) == 4 or ch_name.startswith('STI'):
                 u = {'unit_mul': 0,
+                     'cal': 1,
                      'coil_type': FIFF.FIFFV_COIL_NONE,
                      'unit': FIFF.FIFF_UNIT_NONE,
                      'kind': FIFF.FIFFV_STIM_CH}
                 ch_info.update(u)
             info['chs'].append(ch_info)
 
-        self.preload = True
-        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
-        self._times = np.arange(self.first_samp, self.last_samp + 1,
-                                dtype=np.float64)
-        self._times /= self.info['sfreq']
+        _check_update_montage(info, montage)
+        orig_format = {'>f2': 'single', '>f4': 'double',
+                       '>i2': 'int'}[egi_info['dtype']]
+        super(RawEGI, self).__init__(
+            info, data, filenames=[input_fname], orig_format=orig_format,
+            verbose=verbose)
         logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
                     % (self.first_samp, self.last_samp,
                        float(self.first_samp) / self.info['sfreq'],
                        float(self.last_samp) / self.info['sfreq']))
-
-        # Raw attributes
-        self._filenames = list()
-        self._projector = None
-        self.first_samp = 0
-        self.last_samp = egi_info['n_samples'] - 1
-        self.comp = None  # no compensation for egi
-        self.proj = False
-        self._first_samps = np.array([self.first_samp])
-        self._last_samps = np.array([self.last_samp])
-        self._raw_lengths = np.array([egi_info['n_samples']])
-        self.rawdirs = np.array([])
-        self.cals = np.ones(self.info['nchan'])
         # use information from egi
-        self.orig_format = {'>f4': 'single', '>f4': 'double',
-                            '>i2': 'int'}[egi_info['dtype']]
         logger.info('Ready.')
-
-    def __repr__(self):
-        n_chan = self.info['nchan']
-        data_range = self.last_samp - self.first_samp + 1
-        s = ('%r' % os.path.basename(self.info['filename']),
-             "n_channels x n_times : %s x %s" % (n_chan, data_range))
-        return "<RawEGI  |  %s>" % ', '.join(s)
diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py
index 589df2f..73274bd 100644
--- a/mne/io/egi/tests/test_egi.py
+++ b/mne/io/egi/tests/test_egi.py
@@ -9,15 +9,12 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_true, assert_raises, assert_equal
 
-from mne import find_events
-from mne.io import read_raw_egi
+from mne import find_events, pick_types, concatenate_raws
+from mne.io import read_raw_egi, Raw
 from mne.io.egi import _combine_triggers
-from mne import pick_types
-from mne.io import Raw
 from mne.utils import _TempDir
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
-tempdir = _TempDir()
 
 base_dir = op.join(op.dirname(op.realpath(__file__)), 'data')
 egi_fname = op.join(base_dir, 'test_egi.raw')
@@ -26,9 +23,12 @@ egi_fname = op.join(base_dir, 'test_egi.raw')
 def test_io_egi():
     """Test importing EGI simple binary files"""
     # test default
+    tempdir = _TempDir()
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always', category=RuntimeWarning)
-        _ = read_raw_egi(egi_fname, include=None)
+        raw = read_raw_egi(egi_fname, include=None)
+        assert_true('RawEGI' in repr(raw))
+        raw.load_data()  # currently does nothing
         assert_equal(len(w), 1)
         assert_true(w[0].category == RuntimeWarning)
         msg = 'Did not find any event code with more than one event.'
@@ -36,9 +36,8 @@ def test_io_egi():
 
     include = ['TRSP', 'XXX1']
     raw = read_raw_egi(egi_fname, include=include)
-
-    _ = repr(raw)
-    _ = repr(raw.info)  # analysis:ignore, noqa
+    repr(raw)
+    repr(raw.info)
 
     assert_equal('eeg' in raw, True)
     out_fname = op.join(tempdir, 'test_egi_raw.fif')
@@ -47,8 +46,7 @@ def test_io_egi():
     raw2 = Raw(out_fname, preload=True)
     data1, times1 = raw[:10, :]
     data2, times2 = raw2[:10, :]
-
-    assert_array_almost_equal(data1, data2)
+    assert_array_almost_equal(data1, data2, 9)
     assert_array_almost_equal(times1, times2)
 
     eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
@@ -78,3 +76,7 @@ def test_io_egi():
     for ii, k in enumerate(include, 1):
         assert_true(k in raw.event_id)
         assert_true(raw.event_id[k] == ii)
+
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw.copy(), raw])
+    assert_equal(raw_concat.n_times, 2 * raw.n_times)
diff --git a/mne/io/fiff/__init__.py b/mne/io/fiff/__init__.py
index 084e30a..1a9952e 100644
--- a/mne/io/fiff/__init__.py
+++ b/mne/io/fiff/__init__.py
@@ -1 +1,2 @@
-from .raw import RawFIFF
+from .raw import RawFIF
+from .raw import read_raw_fif
diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py
index 0ed3348..5d1fc42 100644
--- a/mne/io/fiff/raw.py
+++ b/mne/io/fiff/raw.py
@@ -2,6 +2,7 @@
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -13,19 +14,18 @@ import os.path as op
 import numpy as np
 
 from ..constants import FIFF
-from ..open import fiff_open, _fiff_get_fid
+from ..open import fiff_open, _fiff_get_fid, _get_next_fname
 from ..meas_info import read_meas_info
 from ..tree import dir_tree_find
-from ..tag import read_tag
-from ..proj import proj_equal
+from ..tag import read_tag, read_tag_info
+from ..proj import make_eeg_average_ref_proj, _needs_eeg_average_ref_proj
 from ..compensator import get_current_comp, set_current_comp, make_compensator
-from ..base import _BaseRaw
+from ..base import _BaseRaw, _RawShell, _check_raw_compatibility
 
 from ...utils import check_fname, logger, verbose
-from ...externals.six import string_types
 
 
-class RawFIFF(_BaseRaw):
+class RawFIF(_BaseRaw):
     """Raw data
 
     Parameters
@@ -112,47 +112,27 @@ class RawFIFF(_BaseRaw):
 
         _check_raw_compatibility(raws)
 
+        super(RawFIF, self).__init__(
+            copy.deepcopy(raws[0].info), False,
+            [r.first_samp for r in raws], [r.last_samp for r in raws],
+            [r.filename for r in raws], [r._raw_extras for r in raws],
+            copy.deepcopy(raws[0].comp), raws[0]._orig_comp_grade,
+            raws[0].orig_format, None, verbose=verbose)
+
         # combine information from each raw file to construct self
-        self._filenames = [r.filename for r in raws]
-        self.first_samp = raws[0].first_samp  # meta first sample
-        self._first_samps = np.array([r.first_samp for r in raws])
-        self._last_samps = np.array([r.last_samp for r in raws])
-        self._raw_lengths = np.array([r.n_times for r in raws])
-        self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
-        self.cals = raws[0].cals
-        self.rawdirs = [r.rawdir for r in raws]
-        self.comp = copy.deepcopy(raws[0].comp)
-        self._orig_comp_grade = raws[0]._orig_comp_grade
-        self.info = copy.deepcopy(raws[0].info)
-        self.verbose = verbose
-        self.orig_format = raws[0].orig_format
-        self.proj = False
-        self._add_eeg_ref(add_eeg_ref)
+        if add_eeg_ref and _needs_eeg_average_ref_proj(self.info):
+            eeg_ref = make_eeg_average_ref_proj(self.info, activate=False)
+            self.add_proj(eeg_ref)
 
         if preload:
             self._preload_data(preload)
         else:
             self.preload = False
 
-        self._projector = None
         # setup the SSP projector
-        self.proj = proj
         if proj:
             self.apply_proj()
 
-    def _preload_data(self, preload):
-        """This function actually preloads the data"""
-        if isinstance(preload, string_types):
-            # we will use a memmap: preload is a filename
-            data_buffer = preload
-        else:
-            data_buffer = None
-
-        self._data, self._times = self._read_segment(data_buffer=data_buffer)
-        self.preload = True
-        # close files once data are preloaded
-        self.close()
-
     @verbose
     def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
                        do_check_fname=True, verbose=None):
@@ -170,19 +150,29 @@ class RawFIFF(_BaseRaw):
         ff, tree, _ = fiff_open(fname, preload=whole_file)
         with ff as fid:
             #   Read the measurement info
+
             info, meas = read_meas_info(fid, tree)
 
             #   Locate the data of interest
             raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
             if len(raw_node) == 0:
                 raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
-                if allow_maxshield:
+                if (len(raw_node) == 0):
                     raw_node = dir_tree_find(meas, FIFF.FIFFB_SMSH_RAW_DATA)
-                    if len(raw_node) == 0:
-                        raise ValueError('No raw data in %s' % fname)
-                else:
-                    if len(raw_node) == 0:
+                    msg = ('This file contains raw Internal Active '
+                           'Shielding data. It may be distorted. Elekta '
+                           'recommends it be run through MaxFilter to '
+                           'produce reliable results. Consider closing '
+                           'the file and running MaxFilter on the data.')
+                    if (len(raw_node) == 0):
                         raise ValueError('No raw data in %s' % fname)
+                    elif allow_maxshield:
+                        info['maxshield'] = True
+                        warnings.warn(msg)
+                    else:
+                        msg += (' Use allow_maxshield=True if you are sure you'
+                                ' want to load the data despite this warning.')
+                        raise ValueError(msg)
 
             if len(raw_node) == 1:
                 raw_node = raw_node[0]
@@ -216,7 +206,7 @@ class RawFIFF(_BaseRaw):
             raw.first_samp = first_samp
 
             #   Go through the remaining tags in the directory
-            rawdir = list()
+            raw_extras = list()
             nskip = 0
             orig_format = None
             for k in range(first, nent):
@@ -267,54 +257,19 @@ class RawFIFF(_BaseRaw):
 
                     #  Do we have a skip pending?
                     if nskip > 0:
-                        rawdir.append(dict(ent=None, first=first_samp,
-                                           last=first_samp + nskip * nsamp - 1,
-                                           nsamp=nskip * nsamp))
+                        raw_extras.append(dict(
+                            ent=None, first=first_samp, nsamp=nskip * nsamp,
+                            last=first_samp + nskip * nsamp - 1))
                         first_samp += nskip * nsamp
                         nskip = 0
 
                     #  Add a data buffer
-                    rawdir.append(dict(ent=ent, first=first_samp,
-                                       last=first_samp + nsamp - 1,
-                                       nsamp=nsamp))
+                    raw_extras.append(dict(ent=ent, first=first_samp,
+                                           last=first_samp + nsamp - 1,
+                                           nsamp=nsamp))
                     first_samp += nsamp
 
-            # Try to get the next filename tag for split files
-            nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF)
-            next_fname = None
-            for nodes in nodes_list:
-                next_fname = None
-                for ent in nodes['directory']:
-                    if ent.kind == FIFF.FIFF_REF_ROLE:
-                        tag = read_tag(fid, ent.pos)
-                        role = int(tag.data)
-                        if role != FIFF.FIFFV_ROLE_NEXT_FILE:
-                            next_fname = None
-                            break
-                    if ent.kind == FIFF.FIFF_REF_FILE_NAME:
-                        tag = read_tag(fid, ent.pos)
-                        next_fname = op.join(op.dirname(fname), tag.data)
-                    if ent.kind == FIFF.FIFF_REF_FILE_NUM:
-                        # Some files don't have the name, just the number. So
-                        # we construct the name from the current name.
-                        if next_fname is not None:
-                            continue
-                        next_num = read_tag(fid, ent.pos).data
-                        path, base = op.split(fname)
-                        idx = base.find('.')
-                        idx2 = base.rfind('-')
-                        if idx2 < 0 and next_num == 1:
-                            # this is the first file, which may not be numbered
-                            next_fname = op.join(path, '%s-%d.%s' % (base[:idx],
-                                next_num, base[idx + 1:]))
-                            continue
-                        num_str = base[idx2 + 1:idx]
-                        if not num_str.isdigit():
-                            continue
-                        next_fname = op.join(path, '%s-%d.%s' % (base[:idx2],
-                                             next_num, base[idx + 1:]))
-                if next_fname is not None:
-                    break
+            next_fname = _get_next_fname(fid, fname, tree)
 
         raw.last_samp = first_samp - 1
         raw.orig_format = orig_format
@@ -324,8 +279,8 @@ class RawFIFF(_BaseRaw):
         for k in range(info['nchan']):
             cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
 
-        raw.cals = cals
-        raw.rawdir = rawdir
+        raw._cals = cals
+        raw._raw_extras = raw_extras
         raw.comp = None
         raw._orig_comp_grade = None
 
@@ -348,8 +303,9 @@ class RawFIFF(_BaseRaw):
                     float(raw.last_samp) / info['sfreq']))
 
         # store the original buffer size
-        info['buffer_size_sec'] = (np.median([r['nsamp'] for r in rawdir])
-                                   / info['sfreq'])
+        info['buffer_size_sec'] = (np.median([r['nsamp']
+                                              for r in raw_extras]) /
+                                   info['sfreq'])
 
         raw.info = info
         raw.verbose = verbose
@@ -358,121 +314,52 @@ class RawFIFF(_BaseRaw):
 
         return raw, next_fname
 
-    def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
-                      verbose=None, projector=None):
-        """Read a chunk of raw data
-
-        Parameters
-        ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
-        sel : array, optional
-            Indices of channels to select.
-        data_buffer : array or str, optional
-            numpy array to fill with data read, must have the correct shape.
-            If str, a np.memmap with the correct data type will be used
-            to store the data.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
-        projector : array
-            SSP operator to apply to the data.
-
-        Returns
-        -------
-        data : array, [channels x samples]
-           the data matrix (channels x samples).
-        times : array, [samples]
-            returns the time values corresponding to the samples.
-        """
-        #  Initial checks
-        start = int(start)
-        stop = self.n_times if stop is None else min([int(stop), self.n_times])
-
-        if start >= stop:
-            raise ValueError('No data in this range')
-
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(self.info['sfreq']),
-                     (stop - 1) / float(self.info['sfreq'])))
-
-        #  Initialize the data and calibration vector
-        nchan = self.info['nchan']
-
-        n_sel_channels = nchan if sel is None else len(sel)
-        # convert sel to a slice if possible for efficiency
-        if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
-            sel = slice(sel[0], sel[-1] + 1)
-        idx = slice(None, None, None) if sel is None else sel
-        data_shape = (n_sel_channels, stop - start)
-        if isinstance(data_buffer, np.ndarray):
-            if data_buffer.shape != data_shape:
-                raise ValueError('data_buffer has incorrect shape')
-            data = data_buffer
-        else:
-            data = None  # we will allocate it later, once we know the type
-
-        mult = list()
-        for ri in range(len(self._raw_lengths)):
-            mult.append(np.diag(self.cals.ravel()))
-            if self.comp is not None:
-                mult[ri] = np.dot(self.comp, mult[ri])
-            if projector is not None:
-                mult[ri] = np.dot(projector, mult[ri])
-            mult[ri] = mult[ri][idx]
-
-        # deal with having multiple files accessed by the raw object
-        cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
-                                                   dtype='int')))
-        cumul_lens = np.cumsum(cumul_lens)
-        files_used = np.logical_and(np.less(start, cumul_lens[1:]),
-                                    np.greater_equal(stop - 1,
-                                                     cumul_lens[:-1]))
-
-        first_file_used = False
-        s_off = 0
-        dest = 0
-        if isinstance(idx, slice):
-            cals = self.cals.ravel()[idx][:, np.newaxis]
-        else:
-            cals = self.cals.ravel()[:, np.newaxis]
-
-        for fi in np.nonzero(files_used)[0]:
-            start_loc = self._first_samps[fi]
-            # first iteration (only) could start in the middle somewhere
-            if not first_file_used:
-                first_file_used = True
-                start_loc += start - cumul_lens[fi]
-            stop_loc = np.min([stop - 1 - cumul_lens[fi] +
-                               self._first_samps[fi], self._last_samps[fi]])
-            if start_loc < self._first_samps[fi]:
-                raise ValueError('Bad array indexing, could be a bug')
-            if stop_loc > self._last_samps[fi]:
-                raise ValueError('Bad array indexing, could be a bug')
-            if stop_loc < start_loc:
-                raise ValueError('Bad array indexing, could be a bug')
-            len_loc = stop_loc - start_loc + 1
-            fid = _fiff_get_fid(self._filenames[fi])
-
-            for this in self.rawdirs[fi]:
-
+    @property
+    def _dtype(self):
+        """Get the dtype to use to store data from disk"""
+        if self._dtype_ is not None:
+            return self._dtype_
+        dtype = None
+        for raw_extra, filename in zip(self._raw_extras, self._filenames):
+            for this in raw_extra:
+                if this['ent'] is not None:
+                    with _fiff_get_fid(filename) as fid:
+                        fid.seek(this['ent'].pos, 0)
+                        tag = read_tag_info(fid)
+                        if tag is not None:
+                            if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT,
+                                            FIFF.FIFFT_COMPLEX_DOUBLE):
+                                dtype = np.complex128
+                            else:
+                                dtype = np.float64
+                    if dtype is not None:
+                        break
+            if dtype is not None:
+                break
+        if dtype is None:
+            raise RuntimeError('bug in reading')
+        self._dtype_ = dtype
+        return dtype
+
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a segment of data from a file"""
+        with _fiff_get_fid(self._filenames[fi]) as fid:
+            for this in self._raw_extras[fi]:
                 #  Do we need this buffer
-                if this['last'] >= start_loc:
+                if this['last'] >= start:
                     #  The picking logic is a bit complicated
-                    if stop_loc > this['last'] and start_loc < this['first']:
+                    if stop > this['last'] and start < this['first']:
                         #    We need the whole buffer
                         first_pick = 0
                         last_pick = this['nsamp']
                         logger.debug('W')
 
-                    elif start_loc >= this['first']:
-                        first_pick = start_loc - this['first']
-                        if stop_loc <= this['last']:
+                    elif start >= this['first']:
+                        first_pick = start - this['first']
+                        if stop <= this['last']:
                             #   Something from the middle
-                            last_pick = this['nsamp'] + stop_loc - this['last']
+                            last_pick = this['nsamp'] + stop - this['last']
                             logger.debug('M')
                         else:
                             #   From the middle to the end
@@ -481,7 +368,7 @@ class RawFIFF(_BaseRaw):
                     else:
                         #    From the beginning to the middle
                         first_pick = 0
-                        last_pick = stop_loc - this['first'] + 1
+                        last_pick = stop - this['first'] + 1
                         logger.debug('B')
 
                     #   Now we are ready to pick
@@ -490,109 +377,111 @@ class RawFIFF(_BaseRaw):
                         # only read data if it exists
                         if this['ent'] is not None:
                             one = read_tag(fid, this['ent'].pos,
-                                           shape=(this['nsamp'], nchan),
+                                           shape=(this['nsamp'],
+                                                  self.info['nchan']),
                                            rlims=(first_pick, last_pick)).data
-                            if np.isrealobj(one):
-                                dtype = np.float
-                            else:
-                                dtype = np.complex128
-                            one.shape = (picksamp, nchan)
-                            one = one.T.astype(dtype)
-                            # use proj + cal factors in mult
+                            one.shape = (picksamp, self.info['nchan'])
+                            one = one.T.astype(data.dtype)
+                            data_view = data[:, offset:(offset + picksamp)]
                             if mult is not None:
-                                one[idx] = np.dot(mult[fi], one)
-                            else:  # apply just the calibration factors
-                                # this logic is designed to limit memory copies
+                                data_view[:] = np.dot(mult[fi], one)
+                            else:  # cals is not None
                                 if isinstance(idx, slice):
-                                    # This is a view operation, so it's fast
-                                    one[idx] *= cals
+                                    data_view[:] = one[idx]
                                 else:
-                                    # Extra operations are actually faster here
-                                    # than creating a new array
-                                    # (fancy indexing)
-                                    one *= cals
-
-                            # if not already done, allocate array with
-                            # right type
-                            data = _allocate_data(data, data_buffer,
-                                                  data_shape, dtype)
-                            if isinstance(idx, slice):
-                                # faster to slice in data than doing
-                                # one = one[idx] sooner
-                                data[:, dest:(dest + picksamp)] = one[idx]
-                            else:
-                                # faster than doing one = one[idx]
-                                data_view = data[:, dest:(dest + picksamp)]
-                                for ii, ix in enumerate(idx):
-                                    data_view[ii] = one[ix]
-                        dest += picksamp
+                                    # faster to iterate than doing
+                                    # one = one[idx]
+                                    for ii, ix in enumerate(idx):
+                                        data_view[ii] = one[ix]
+                                data_view *= cals
+                        offset += picksamp
 
                 #   Done?
-                if this['last'] >= stop_loc:
-                    # if not already done, allocate array with float dtype
-                    data = _allocate_data(data, data_buffer, data_shape,
-                                          np.float)
+                if this['last'] >= stop:
                     break
 
-            fid.close()  # clean it up
-            s_off += len_loc
-            # double-check our math
-            if not s_off == dest:
-                raise ValueError('Incorrect file reading')
-
-        logger.info('[done]')
-        times = np.arange(start, stop) / self.info['sfreq']
+    def fix_mag_coil_types(self):
+        """Fix Elekta magnetometer coil types
 
-        return data, times
+        Returns
+        -------
+        raw : instance of Raw
+            The raw object. Operates in place.
+
+        Notes
+        -----
+        This function changes magnetometer coil types 3022 (T1: SQ20483N) and
+        3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
+        records in the info structure.
+
+        Neuromag Vectorview systems can contain magnetometers with two
+        different coil sizes (3022 and 3023 vs. 3024). The systems
+        incorporating coils of type 3024 were introduced last and are used at
+        the majority of MEG sites. At some sites with 3024 magnetometers,
+        the data files have still defined the magnetometers to be of type
+        3022 to ensure compatibility with older versions of Neuromag software.
+        In the MNE software as well as in the present version of Neuromag
+        software coil type 3024 is fully supported. Therefore, it is now safe
+        to upgrade the data files to use the true coil type.
+
+        .. note:: The effect of the difference between the coil sizes on the
+                  current estimates computed by the MNE software is very small.
+                  Therefore the use of mne_fix_mag_coil_types is not mandatory.
+        """
+        from ...channels import fix_mag_coil_types
+        fix_mag_coil_types(self.info)
+        return self
 
 
-def _allocate_data(data, data_buffer, data_shape, dtype):
-    if data is None:
-        # if not already done, allocate array with right type
-        if isinstance(data_buffer, string_types):
-            # use a memmap
-            data = np.memmap(data_buffer, mode='w+',
-                             dtype=dtype, shape=data_shape)
-        else:
-            data = np.zeros(data_shape, dtype=dtype)
-    return data
+def read_raw_fif(fnames, allow_maxshield=False, preload=False,
+                 proj=False, compensation=None, add_eeg_ref=True,
+                 verbose=None):
+    """Reader function for Raw FIF data
 
+    Parameters
+    ----------
+    fnames : list, or string
+        A list of the raw files to treat as a Raw instance, or a single
+        raw file. For files that have automatically been split, only the
+        name of the first file has to be specified. Filenames should end
+        with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz,
+        raw_tsss.fif or raw_tsss.fif.gz.
+    allow_maxshield : bool, (default False)
+        allow_maxshield if True, allow loading of data that has been
+        processed with Maxshield. Maxshield-processed data should generally
+        not be loaded directly, but should be processed using SSS first.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
+    proj : bool
+        Apply the signal space projection (SSP) operators present in
+        the file to the data. Note: Once the projectors have been
+        applied, they can no longer be removed. It is usually not
+        recommended to apply the projectors at this point as they are
+        applied automatically later on (e.g. when computing inverse
+        solutions).
+    compensation : None | int
+        If None the compensation in the data is not modified.
+        If set to n, e.g. 3, apply gradient compensation of grade n as
+        for CTF systems.
+    add_eeg_ref : bool
+        If True, add average EEG reference projector (if it's not already
+        present).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
-class _RawShell():
-    """Used for creating a temporary raw object"""
-    def __init__(self):
-        self.first_samp = None
-        self.last_samp = None
-        self.cals = None
-        self.rawdir = None
-        self._projector = None
+    Returns
+    -------
+    raw : Instance of RawFIF
+        A Raw object containing FIF data.
 
-    @property
-    def n_times(self):
-        return self.last_samp - self.first_samp + 1
-
-
-def _check_raw_compatibility(raw):
-    """Check to make sure all instances of Raw
-    in the input list raw have compatible parameters"""
-    for ri in range(1, len(raw)):
-        if not raw[ri].info['nchan'] == raw[0].info['nchan']:
-            raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
-        if not raw[ri].info['bads'] == raw[0].info['bads']:
-            raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
-        if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
-            raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
-        if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
-            raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
-        if not all(raw[ri].cals == raw[0].cals):
-            raise ValueError('raw[%d].cals must match' % ri)
-        if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
-            raise ValueError('SSP projectors in raw files must be the same')
-        if not all(proj_equal(p1, p2) for p1, p2 in
-                   zip(raw[0].info['projs'], raw[ri].info['projs'])):
-            raise ValueError('SSP projectors in raw files must be the same')
-    if not all([r.orig_format == raw[0].orig_format for r in raw]):
-        warnings.warn('raw files do not all have the same data format, '
-                      'could result in precision mismatch. Setting '
-                      'raw.orig_format="unknown"')
-        raw[0].orig_format = 'unknown'
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    return RawFIF(fnames=fnames, allow_maxshield=allow_maxshield,
+                  preload=preload, proj=proj, compensation=compensation,
+                  add_eeg_ref=add_eeg_ref, verbose=verbose)
diff --git a/mne/io/fiff/tests/test_raw.py b/mne/io/fiff/tests/test_raw.py
index 8302b74..e3f561e 100644
--- a/mne/io/fiff/tests/test_raw.py
+++ b/mne/io/fiff/tests/test_raw.py
@@ -10,28 +10,33 @@ import os.path as op
 import glob
 from copy import deepcopy
 import warnings
+import itertools as itt
 
 import numpy as np
 from numpy.testing import (assert_array_almost_equal, assert_array_equal,
-                           assert_allclose)
-from nose.tools import (assert_true, assert_raises, assert_equal,
-                        assert_not_equal)
+                           assert_allclose, assert_equal)
+from nose.tools import assert_true, assert_raises, assert_not_equal
 
-from mne import pick_types, pick_channels
+from mne.datasets import testing
 from mne.io.constants import FIFF
-from mne.io import (Raw, concatenate_raws,
-                    get_chpi_positions, set_eeg_reference)
-from mne import concatenate_events, find_events, equalize_channels
-from mne.utils import (_TempDir, requires_nitime, requires_pandas,
-                       requires_mne, run_subprocess)
-from mne.externals.six.moves import zip
-from mne.externals.six.moves import cPickle as pickle
+from mne.io import Raw, RawArray, concatenate_raws, read_raw_fif
+from mne.io.tests.test_raw import _test_concat
+from mne import (concatenate_events, find_events, equalize_channels,
+                 compute_proj_raw, pick_types, pick_channels, create_info)
+from mne.utils import (_TempDir, requires_pandas, slow_test,
+                       requires_mne, run_subprocess, run_tests_if_main)
+from mne.externals.six.moves import zip, cPickle as pickle
+from mne.io.proc_history import _get_sss_rank
+from mne.io.pick import _picks_by_type
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
+data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+
 base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
-fif_fname = op.join(base_dir, 'test_raw.fif')
-fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
+test_fif_fname = op.join(base_dir, 'test_raw.fif')
+test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
 ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
 ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
 fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
@@ -40,29 +45,69 @@ bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
 hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
 hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
 
-tempdir = _TempDir()
+
+def test_fix_types():
+    """Test fixing of channel types
+    """
+    for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
+                          (ctf_fname, False)):
+        raw = Raw(fname)
+        mag_picks = pick_types(raw.info, meg='mag')
+        other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
+        # we don't actually have any files suffering from this problem, so
+        # fake it
+        if change:
+            for ii in mag_picks:
+                raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
+        orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
+        raw.fix_mag_coil_types()
+        new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
+        if not change:
+            assert_array_equal(orig_types, new_types)
+        else:
+            assert_array_equal(orig_types[other_picks], new_types[other_picks])
+            assert_true((orig_types[mag_picks] != new_types[mag_picks]).all())
+            assert_true((new_types[mag_picks] ==
+                         FIFF.FIFFV_COIL_VV_MAG_T3).all())
+
+
+def test_concat():
+    """Test RawFIF concatenation
+    """
+    # we trim the file to save lots of memory and some time
+    tempdir = _TempDir()
+    raw = read_raw_fif(test_fif_fname)
+    raw.crop(0, 2., copy=False)
+    test_name = op.join(tempdir, 'test_raw.fif')
+    raw.save(test_name)
+    # now run the standard test
+    _test_concat(read_raw_fif, test_name)
 
 
+ at testing.requires_testing_data
 def test_hash_raw():
     """Test hashing raw objects
     """
-    raw = Raw(fif_fname)
+    raw = read_raw_fif(fif_fname)
     assert_raises(RuntimeError, raw.__hash__)
-    raw = Raw(fif_fname, preload=True).crop(0, 0.5)
-    raw_2 = Raw(fif_fname, preload=True).crop(0, 0.5)
+    raw = Raw(fif_fname).crop(0, 0.5, False)
+    raw.load_data()
+    raw_2 = Raw(fif_fname).crop(0, 0.5, False)
+    raw_2.load_data()
     assert_equal(hash(raw), hash(raw_2))
     # do NOT use assert_equal here, failing output is terrible
-    assert_true(pickle.dumps(raw) == pickle.dumps(raw_2))
+    assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
 
     raw_2._data[0, 0] -= 1
     assert_not_equal(hash(raw), hash(raw_2))
 
 
+ at testing.requires_testing_data
 def test_subject_info():
     """Test reading subject information
     """
-    raw = Raw(fif_fname)
-    raw.crop(0, 1, False)
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 1, False)
     assert_true(raw.info['subject_info'] is None)
     # fake some subject data
     keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
@@ -85,23 +130,7 @@ def test_subject_info():
     assert_true(raw_read.info.get('subject_info') is None)
 
 
-def test_get_chpi():
-    """Test CHPI position computation
-    """
-    trans0, rot0, _ = get_chpi_positions(hp_fname)
-    raw = Raw(hp_fif_fname)
-    out = get_chpi_positions(raw)
-    trans1, rot1, t1 = out
-    trans1 = trans1[2:]
-    rot1 = rot1[2:]
-    # these will not be exact because they don't use equiv. time points
-    assert_allclose(trans0, trans1, atol=1e-6, rtol=1e-1)
-    assert_allclose(rot0, rot1, atol=1e-6, rtol=1e-1)
-    # run through input checking
-    assert_raises(TypeError, get_chpi_positions, 1)
-    assert_raises(ValueError, get_chpi_positions, hp_fname, [1])
-
-
+ at testing.requires_testing_data
 def test_copy_append():
     """Test raw copying and appending combinations
     """
@@ -109,47 +138,74 @@ def test_copy_append():
     raw_full = Raw(fif_fname)
     raw_full.append(raw)
     data = raw_full[:, :][0]
-    assert_true(data.shape[1] == 2 * raw._data.shape[1])
+    assert_equal(data.shape[1], 2 * raw._data.shape[1])
 
 
+ at slow_test
+ at testing.requires_testing_data
 def test_rank_estimation():
     """Test raw rank estimation
     """
-    raw = Raw(fif_fname)
-    picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
-    n_meg = len(picks_meg)
-    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
-    n_eeg = len(picks_eeg)
-    raw = Raw(fif_fname, preload=True)
-    assert_array_equal(raw.estimate_rank(), n_meg + n_eeg)
-    assert_array_equal(raw.estimate_rank(picks=picks_eeg), n_eeg)
-    raw = Raw(fif_fname, preload=False)
-    raw.apply_proj()
-    n_proj = len(raw.info['projs'])
-    assert_array_equal(raw.estimate_rank(tstart=10, tstop=20),
-                       n_meg + n_eeg - n_proj)
+    iter_tests = itt.product(
+        [fif_fname, hp_fif_fname],  # sss
+        ['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
+    )
+    for fname, scalings in iter_tests:
+        raw = Raw(fname)
+        (_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
+                                                        meg_combined=True)
+        n_meg = len(picks_meg)
+        n_eeg = len(picks_eeg)
+
+        raw = Raw(fname, preload=True)
+        if 'proc_history' not in raw.info:
+            expected_rank = n_meg + n_eeg
+        else:
+            mf = raw.info['proc_history'][0]['max_info']
+            expected_rank = _get_sss_rank(mf) + n_eeg
+        assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
+
+        assert_array_equal(raw.estimate_rank(picks=picks_eeg,
+                                             scalings=scalings),
+                           n_eeg)
+
+        raw = Raw(fname, preload=False)
+        if 'sss' in fname:
+            tstart, tstop = 0., 30.
+            raw.add_proj(compute_proj_raw(raw))
+            raw.apply_proj()
+        else:
+            tstart, tstop = 10., 20.
+
+        raw.apply_proj()
+        n_proj = len(raw.info['projs'])
+
+        assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
+                                             scalings=scalings),
+                           expected_rank - (1 if 'sss' in fname else n_proj))
 
 
+ at testing.requires_testing_data
 def test_output_formats():
     """Test saving and loading raw data using multiple formats
     """
+    tempdir = _TempDir()
     formats = ['short', 'int', 'single', 'double']
     tols = [1e-4, 1e-7, 1e-7, 1e-15]
 
     # let's fake a raw file with different formats
-    raw = Raw(fif_fname, preload=True)
-    raw.crop(0, 1, copy=False)
+    raw = Raw(test_fif_fname).crop(0, 1, copy=False)
 
     temp_file = op.join(tempdir, 'raw.fif')
-    for ii, (format, tol) in enumerate(zip(formats, tols)):
+    for ii, (fmt, tol) in enumerate(zip(formats, tols)):
         # Let's test the overwriting error throwing while we're at it
         if ii > 0:
-            assert_raises(IOError, raw.save, temp_file, format=format)
-        raw.save(temp_file, format=format, overwrite=True)
+            assert_raises(IOError, raw.save, temp_file, fmt=fmt)
+        raw.save(temp_file, fmt=fmt, overwrite=True)
         raw2 = Raw(temp_file)
         raw2_data = raw2[:, :][0]
-        assert_allclose(raw2_data, raw._data, rtol=tol, atol=1e-25)
-        assert_true(raw2.orig_format == format)
+        assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
+        assert_equal(raw2.orig_format, fmt)
 
 
 def _compare_combo(raw, new, times, n_times):
@@ -159,11 +215,16 @@ def _compare_combo(raw, new, times, n_times):
         assert_allclose(orig, new[:, ti][0])
 
 
+ at slow_test
+ at testing.requires_testing_data
 def test_multiple_files():
     """Test loading multiple files simultaneously
     """
     # split file
-    raw = Raw(fif_fname, preload=True).crop(0, 10)
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 10, False)
+    raw.load_data()
+    raw.load_data()  # test no operation
     split_size = 3.  # in seconds
     sfreq = raw.info['sfreq']
     nsamp = (raw.last_samp - raw.first_samp)
@@ -171,7 +232,7 @@ def test_multiple_files():
     tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
     tmaxs /= sfreq
     tmins /= sfreq
-    assert_equal(raw.n_times, len(raw._times))
+    assert_equal(raw.n_times, len(raw.times))
 
     # going in reverse order so the last fname is the first file (need later)
     raws = [None] * len(tmins)
@@ -187,8 +248,8 @@ def test_multiple_files():
     assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
     all_raw_1, events1 = concatenate_raws(raws, preload=False,
                                           events_list=events)
-    assert_true(raw.first_samp == all_raw_1.first_samp)
-    assert_true(raw.last_samp == all_raw_1.last_samp)
+    assert_equal(raw.first_samp, all_raw_1.first_samp)
+    assert_equal(raw.last_samp, all_raw_1.last_samp)
     assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
     raws[0] = Raw(fname)
     all_raw_2 = concatenate_raws(raws, preload=True)
@@ -202,7 +263,7 @@ def test_multiple_files():
 
     # test various methods of combining files
     raw = Raw(fif_fname, preload=True)
-    n_times = len(raw._times)
+    n_times = raw.n_times
     # make sure that all our data match
     times = list(range(0, 2 * n_times, 999))
     # add potentially problematic points
@@ -216,14 +277,14 @@ def test_multiple_files():
     _compare_combo(raw, raw_combo, times, n_times)
     assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
     assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
-    assert_true(raw[:, :][0].shape[1] * 2 == raw_combo0[:, :][0].shape[1])
-    assert_true(raw_combo0[:, :][0].shape[1] == len(raw_combo0._times))
+    assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
+    assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
 
     # with all data preloaded, result should be preloaded
     raw_combo = Raw(fif_fname, preload=True)
     raw_combo.append(Raw(fif_fname, preload=True))
     assert_true(raw_combo.preload is True)
-    assert_true(len(raw_combo._times) == raw_combo._data.shape[1])
+    assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
     _compare_combo(raw, raw_combo, times, n_times)
 
     # with any data not preloaded, don't set result as preloaded
@@ -270,18 +331,22 @@ def test_multiple_files():
     assert_array_equal(events, events2)
 
     # check out the len method
-    assert_true(len(raw) == raw.n_times)
-    assert_true(len(raw) == raw.last_samp - raw.first_samp + 1)
+    assert_equal(len(raw), raw.n_times)
+    assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
 
 
+ at testing.requires_testing_data
 def test_split_files():
     """Test writing and reading of split raw files
     """
+    tempdir = _TempDir()
     raw_1 = Raw(fif_fname, preload=True)
+    assert_allclose(raw_1.info['buffer_size_sec'], 10., atol=1e-2)  # samp rate
     split_fname = op.join(tempdir, 'split_raw.fif')
     raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
 
     raw_2 = Raw(split_fname)
+    assert_allclose(raw_2.info['buffer_size_sec'], 1., atol=1e-2)  # samp rate
     data_1, times_1 = raw_1[:, :]
     data_2, times_2 = raw_2[:, :]
     assert_array_equal(data_1, data_2)
@@ -301,10 +366,11 @@ def test_split_files():
 def test_load_bad_channels():
     """Test reading/writing of bad channels
     """
+    tempdir = _TempDir()
     # Load correctly marked file (manually done in mne_process_raw)
     raw_marked = Raw(fif_bad_marked_fname)
     correct_bads = raw_marked.info['bads']
-    raw = Raw(fif_fname)
+    raw = Raw(test_fif_fname)
     # Make sure it starts clean
     assert_array_equal(raw.info['bads'], [])
 
@@ -338,12 +404,16 @@ def test_load_bad_channels():
     assert_equal([], raw_new.info['bads'])
 
 
+ at slow_test
+ at testing.requires_testing_data
 def test_io_raw():
     """Test IO for raw data (Neuromag + CTF + gz)
     """
+    tempdir = _TempDir()
     # test unicode io
     for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
         with Raw(fif_fname) as r:
+            assert_true('Raw' in repr(r))
             desc1 = r.info['description'] = chars.decode('utf-8')
             temp_file = op.join(tempdir, 'raw.fif')
             r.save(temp_file, overwrite=True)
@@ -352,8 +422,8 @@ def test_io_raw():
             assert_equal(desc1, desc2)
 
     # Let's construct a simple test for IO first
-    raw = Raw(fif_fname, preload=True)
-    raw.crop(0, 3.5)
+    raw = Raw(fif_fname).crop(0, 3.5, False)
+    raw.load_data()
     # put in some data that we know the values of
     data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
     raw._data[:, :] = data
@@ -362,14 +432,14 @@ def test_io_raw():
     raw.save(fname, buffer_size_sec=1.0)
     # read it in, make sure the whole thing matches
     raw = Raw(fname)
-    assert_true(np.allclose(data, raw[:, :][0], 1e-6, 1e-20))
+    assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
     # let's read portions across the 1-sec tag boundary, too
     inds = raw.time_as_index([1.75, 2.25])
     sl = slice(inds[0], inds[1])
-    assert_true(np.allclose(data[:, sl], raw[:, sl][0], 1e-6, 1e-20))
+    assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
 
     # now let's do some real I/O
-    fnames_in = [fif_fname, fif_gz_fname, ctf_fname]
+    fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
     fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
     for fname_in, fname_out in zip(fnames_in, fnames_out):
         fname_out = op.join(tempdir, fname_out)
@@ -395,7 +465,7 @@ def test_io_raw():
         # Writing with drop_small_buffer True
         raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
                  drop_small_buffer=True, overwrite=True)
-        raw2 = Raw(fname_out, preload=True)
+        raw2 = Raw(fname_out)
 
         sel = pick_channels(raw2.ch_names, meg_ch_names)
         data2, times2 = raw2[sel, :]
@@ -405,14 +475,14 @@ def test_io_raw():
         raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
 
         if fname_in == fif_fname or fname_in == fif_fname + '.gz':
-            assert_true(len(raw.info['dig']) == 146)
+            assert_equal(len(raw.info['dig']), 146)
 
         raw2 = Raw(fname_out)
 
         sel = pick_channels(raw2.ch_names, meg_ch_names)
         data2, times2 = raw2[sel, :]
 
-        assert_true(np.allclose(data, data2, 1e-6, 1e-20))
+        assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
         assert_allclose(times, times2)
         assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
 
@@ -434,8 +504,8 @@ def test_io_raw():
                 else:
                     to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
                 for raw_ in [raw, raw2]:
-                    assert_true(raw_.info[trans]['from'] == from_id)
-                    assert_true(raw_.info[trans]['to'] == to_id)
+                    assert_equal(raw_.info[trans]['from'], from_id)
+                    assert_equal(raw_.info[trans]['to'], to_id)
 
         if fname_in == fif_fname or fname_in == fif_fname + '.gz':
             assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
@@ -449,9 +519,11 @@ def test_io_raw():
     assert_true(len(w) > 0)  # len(w) should be 2 but Travis sometimes has more
 
 
+ at testing.requires_testing_data
 def test_io_complex():
     """Test IO with complex data types
     """
+    tempdir = _TempDir()
     dtypes = [np.complex64, np.complex128]
 
     raw = Raw(fif_fname, preload=True)
@@ -486,6 +558,7 @@ def test_io_complex():
         assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
 
 
+ at testing.requires_testing_data
 def test_getitem():
     """Test getitem/indexing of Raw
     """
@@ -504,9 +577,11 @@ def test_getitem():
         assert_array_equal(times, times1)
 
 
+ at testing.requires_testing_data
 def test_proj():
     """Test SSP proj operations
     """
+    tempdir = _TempDir()
     for proj in [True, False]:
         raw = Raw(fif_fname, preload=False, proj=proj)
         assert_true(all(p['active'] == proj for p in raw.info['projs']))
@@ -525,11 +600,11 @@ def test_proj():
             projs = deepcopy(raw.info['projs'])
             n_proj = len(raw.info['projs'])
             raw.del_proj(0)
-            assert_true(len(raw.info['projs']) == n_proj - 1)
+            assert_equal(len(raw.info['projs']), n_proj - 1)
             raw.add_proj(projs, remove_existing=False)
-            assert_true(len(raw.info['projs']) == 2 * n_proj - 1)
+            assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
             raw.add_proj(projs, remove_existing=True)
-            assert_true(len(raw.info['projs']) == n_proj)
+            assert_equal(len(raw.info['projs']), n_proj)
 
     # test apply_proj() with and without preload
     for preload in [True, False]:
@@ -560,10 +635,23 @@ def test_proj():
         assert_allclose(data_proj_1, data_proj_2)
         assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
 
+    tempdir = _TempDir()
+    out_fname = op.join(tempdir, 'test_raw.fif')
+    raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
+    raw.pick_types(meg=False, eeg=True)
+    raw.info['projs'] = [raw.info['projs'][-1]]
+    raw._data.fill(0)
+    raw._data[-1] = 1.
+    raw.save(out_fname)
+    raw = read_raw_fif(out_fname, proj=True, preload=False)
+    assert_allclose(raw[:, :][0][:1], raw[0, :][0])
+
 
+ at testing.requires_testing_data
 def test_preload_modify():
     """Test preloading and modifying data
     """
+    tempdir = _TempDir()
     for preload in [False, True, 'memmap.dat']:
         raw = Raw(fif_fname, preload=preload)
 
@@ -589,10 +677,13 @@ def test_preload_modify():
         assert_allclose(data, data_new)
 
 
+ at slow_test
+ at testing.requires_testing_data
 def test_filter():
     """Test filtering (FIR and IIR) and Raw.apply_function interface
     """
-    raw = Raw(fif_fname, preload=True).crop(0, 7, False)
+    raw = Raw(fif_fname).crop(0, 7, False)
+    raw.load_data()
     sig_dec = 11
     sig_dec_notch = 12
     sig_dec_notch_fit = 12
@@ -660,6 +751,7 @@ def test_filter():
     assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
 
 
+ at testing.requires_testing_data
 def test_crop():
     """Test cropping raw files
     """
@@ -679,8 +771,8 @@ def test_crop():
     for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
         raws[ri] = raw.crop(tmin, tmax, True)
     all_raw_2 = concatenate_raws(raws, preload=False)
-    assert_true(raw.first_samp == all_raw_2.first_samp)
-    assert_true(raw.last_samp == all_raw_2.last_samp)
+    assert_equal(raw.first_samp, all_raw_2.first_samp)
+    assert_equal(raw.last_samp, all_raw_2.last_samp)
     assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
 
     tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
@@ -698,32 +790,35 @@ def test_crop():
 
     all_raw_2 = raw.crop(0, None, True)
     for ar in [all_raw_1, all_raw_2]:
-        assert_true(raw.first_samp == ar.first_samp)
-        assert_true(raw.last_samp == ar.last_samp)
+        assert_equal(raw.first_samp, ar.first_samp)
+        assert_equal(raw.last_samp, ar.last_samp)
         assert_array_equal(raw[:, :][0], ar[:, :][0])
 
 
+ at testing.requires_testing_data
 def test_resample():
     """Test resample (with I/O and multiple files)
     """
-    raw = Raw(fif_fname, preload=True).crop(0, 3, False)
+    tempdir = _TempDir()
+    raw = Raw(fif_fname).crop(0, 3, False)
+    raw.load_data()
     raw_resamp = raw.copy()
     sfreq = raw.info['sfreq']
     # test parallel on upsample
     raw_resamp.resample(sfreq * 2, n_jobs=2)
-    assert_true(raw_resamp.n_times == len(raw_resamp._times))
+    assert_equal(raw_resamp.n_times, len(raw_resamp.times))
     raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
     raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
-    assert_true(sfreq == raw_resamp.info['sfreq'] / 2)
-    assert_true(raw.n_times == raw_resamp.n_times / 2)
-    assert_true(raw_resamp._data.shape[1] == raw_resamp.n_times)
-    assert_true(raw._data.shape[0] == raw_resamp._data.shape[0])
+    assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
+    assert_equal(raw.n_times, raw_resamp.n_times / 2)
+    assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
+    assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
     # test non-parallel on downsample
     raw_resamp.resample(sfreq, n_jobs=1)
-    assert_true(raw_resamp.info['sfreq'] == sfreq)
-    assert_true(raw._data.shape == raw_resamp._data.shape)
-    assert_true(raw.first_samp == raw_resamp.first_samp)
-    assert_true(raw.last_samp == raw.last_samp)
+    assert_equal(raw_resamp.info['sfreq'], sfreq)
+    assert_equal(raw._data.shape, raw_resamp._data.shape)
+    assert_equal(raw.first_samp, raw_resamp.first_samp)
+    assert_equal(raw.last_samp, raw.last_samp)
     # upsampling then downsampling doubles resampling error, but this still
     # works (hooray). Note that the stim channels had to be sub-sampled
     # without filtering to be accurately preserved
@@ -742,9 +837,9 @@ def test_resample():
     raw3 = raw.copy()
     raw4 = raw.copy()
     raw1 = concatenate_raws([raw1, raw2])
-    raw1.resample(10)
-    raw3.resample(10)
-    raw4.resample(10)
+    raw1.resample(10.)
+    raw3.resample(10.)
+    raw4.resample(10.)
     raw3 = concatenate_raws([raw3, raw4])
     assert_array_equal(raw1._data, raw3._data)
     assert_array_equal(raw1._first_samps, raw3._first_samps)
@@ -754,7 +849,65 @@ def test_resample():
     assert_equal(raw1.last_samp, raw3.last_samp)
     assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
 
+    # test resampling of stim channel
+
+    # basic decimation
+    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    assert_allclose(raw.resample(8.)._data,
+                    [[1, 1, 0, 0, 1, 1, 0, 0]])
+
+    # decimation of multiple stim channels
+    raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
+    assert_allclose(raw.resample(8.)._data,
+                    [[1, 1, 0, 0, 1, 1, 0, 0],
+                     [1, 1, 0, 0, 1, 1, 0, 0]])
+
+    # decimation that could potentially drop events if the decimation is
+    # done naively
+    stim = [0, 0, 0, 1, 1, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    assert_allclose(raw.resample(4.)._data,
+                    [[0, 1, 1, 0]])
+
+    # two events are merged in this case (warning)
+    stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        raw.resample(8.)
+        assert_true(len(w) == 1)
 
+    # events are dropped in this case (warning)
+    stim = [0, 1, 1, 0, 0, 1, 1, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        raw.resample(4.)
+        assert_true(len(w) == 1)
+
+    # test resampling events: this should no longer give a warning
+    stim = [0, 1, 1, 0, 0, 1, 1, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    events = find_events(raw)
+    raw, events = raw.resample(4., events=events)
+    assert_equal(events, np.array([[0, 0, 1], [2, 0, 1]]))
+
+    # test copy flag
+    stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
+    raw = RawArray([stim], create_info(1, len(stim), ['stim']))
+    raw_resampled = raw.resample(4., copy=True)
+    assert_true(raw_resampled is not raw)
+    raw_resampled = raw.resample(4., copy=False)
+    assert_true(raw_resampled is raw)
+
+    # resample should still work even when no stim channel is present
+    raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
+    raw.resample(10)
+    assert_true(len(raw) == 10)
+
+
+ at testing.requires_testing_data
 def test_hilbert():
     """Test computation of analytic signal using hilbert
     """
@@ -762,14 +915,29 @@ def test_hilbert():
     picks_meg = pick_types(raw.info, meg=True, exclude='bads')
     picks = picks_meg[:4]
 
+    raw_filt = raw.copy()
+    raw_filt.filter(10, 20)
+    raw_filt_2 = raw_filt.copy()
+
     raw2 = raw.copy()
+    raw3 = raw.copy()
     raw.apply_hilbert(picks)
     raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
 
+    # Test custom n_fft
+    raw_filt.apply_hilbert(picks)
+    raw_filt_2.apply_hilbert(picks, n_fft=raw_filt_2.n_times + 1000)
+    assert_equal(raw_filt._data.shape, raw_filt_2._data.shape)
+    assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
+                    atol=1e-13, rtol=1e-2)
+    assert_raises(ValueError, raw3.apply_hilbert, picks,
+                  n_fft=raw3.n_times - 100)
+
     env = np.abs(raw._data[picks, :])
     assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
 
 
+ at testing.requires_testing_data
 def test_raw_copy():
     """Test Raw copy
     """
@@ -790,53 +958,28 @@ def test_raw_copy():
                  sorted(copied.__dict__.keys()))
 
 
- at requires_nitime
-def test_raw_to_nitime():
-    """ Test nitime export """
-    raw = Raw(fif_fname, preload=True)
-    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
-    picks = picks_meg[:4]
-    raw_ts = raw.to_nitime(picks=picks)
-    assert_true(raw_ts.data.shape[0] == len(picks))
-
-    raw = Raw(fif_fname, preload=False)
-    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
-    picks = picks_meg[:4]
-    raw_ts = raw.to_nitime(picks=picks)
-    assert_true(raw_ts.data.shape[0] == len(picks))
-
-    raw = Raw(fif_fname, preload=True)
-    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
-    picks = picks_meg[:4]
-    raw_ts = raw.to_nitime(picks=picks, copy=False)
-    assert_true(raw_ts.data.shape[0] == len(picks))
-
-    raw = Raw(fif_fname, preload=False)
-    picks_meg = pick_types(raw.info, meg=True, exclude='bads')
-    picks = picks_meg[:4]
-    raw_ts = raw.to_nitime(picks=picks, copy=False)
-    assert_true(raw_ts.data.shape[0] == len(picks))
-
-
 @requires_pandas
-def test_as_data_frame():
+def test_to_data_frame():
     """Test raw Pandas exporter"""
-    raw = Raw(fif_fname, preload=True)
-    df = raw.as_data_frame()
+    raw = Raw(test_fif_fname, preload=True)
+    _, times = raw[0, :10]
+    df = raw.to_data_frame()
     assert_true((df.columns == raw.ch_names).all())
-    df = raw.as_data_frame(use_time_index=False)
-    assert_true('time' in df.columns)
-    assert_array_equal(df.values[:, 1], raw._data[0] * 1e13)
-    assert_array_equal(df.values[:, 3], raw._data[2] * 1e15)
+    assert_array_equal(np.round(times * 1e3), df.index.values[:10])
+    df = raw.to_data_frame(index=None)
+    assert_true('time' in df.index.names)
+    assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
+    assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
 
 
+ at testing.requires_testing_data
 def test_raw_index_as_time():
     """ Test index as time conversion"""
     raw = Raw(fif_fname, preload=True)
     t0 = raw.index_as_time([0], True)[0]
     t1 = raw.index_as_time([100], False)[0]
     t2 = raw.index_as_time([100], True)[0]
-    assert_true((t2 - t1) == t0)
+    assert_equal(t2 - t1, t0)
     # ensure we can go back and forth
     t3 = raw.index_as_time(raw.time_as_index([0], True), True)
     assert_array_almost_equal(t3, [0.0], 2)
@@ -845,23 +988,57 @@ def test_raw_index_as_time():
     t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
     assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
     i0 = raw.time_as_index(raw.index_as_time([0], True), True)
-    assert_true(i0[0] == 0)
+    assert_equal(i0[0], 0)
     i1 = raw.time_as_index(raw.index_as_time([100], True), True)
-    assert_true(i1[0] == 100)
+    assert_equal(i1[0], 100)
     # Have to add small amount of time because we truncate via int casting
     i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
-    assert_true(i1[0] == 100)
+    assert_equal(i1[0], 100)
 
 
+def test_add_channels():
+    """Test raw splitting / re-appending channel types
+    """
+    raw = Raw(test_fif_fname).crop(0, 1).load_data()
+    raw_nopre = Raw(test_fif_fname, preload=False)
+    raw_eeg_meg = raw.pick_types(meg=True, eeg=True, copy=True)
+    raw_eeg = raw.pick_types(meg=False, eeg=True, copy=True)
+    raw_meg = raw.pick_types(meg=True, eeg=False, copy=True)
+    raw_stim = raw.pick_types(meg=False, eeg=False, stim=True, copy=True)
+    raw_new = raw_meg.add_channels([raw_eeg, raw_stim], copy=True)
+    assert_true(all(ch in raw_new.ch_names
+                    for ch in raw_stim.ch_names + raw_meg.ch_names))
+    raw_new = raw_meg.add_channels([raw_eeg], copy=True)
+
+    assert_true(ch in raw_new.ch_names for ch in raw.ch_names)
+    assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
+    assert_array_equal(raw_new[:, :][1], raw[:, :][1])
+    assert_true(all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
+
+    # Now test errors
+    raw_badsf = raw_eeg.copy()
+    raw_badsf.info['sfreq'] = 3.1415927
+    raw_eeg = raw_eeg.crop(.5)
+
+    assert_raises(AssertionError, raw_meg.add_channels, [raw_nopre])
+    assert_raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
+    assert_raises(AssertionError, raw_meg.add_channels, [raw_eeg])
+    assert_raises(ValueError, raw_meg.add_channels, [raw_meg])
+    assert_raises(AssertionError, raw_meg.add_channels, raw_badsf)
+
+
+ at testing.requires_testing_data
 def test_raw_time_as_index():
     """ Test time as index conversion"""
     raw = Raw(fif_fname, preload=True)
     first_samp = raw.time_as_index([0], True)[0]
-    assert_true(raw.first_samp == -first_samp)
+    assert_equal(raw.first_samp, -first_samp)
 
 
+ at testing.requires_testing_data
 def test_save():
     """ Test saving raw"""
+    tempdir = _TempDir()
     raw = Raw(fif_fname, preload=False)
     # can't write over file being read
     assert_raises(ValueError, raw.save, fif_fname)
@@ -880,6 +1057,7 @@ def test_save():
     os.remove(new_fname)
 
 
+ at testing.requires_testing_data
 def test_with_statement():
     """ Test with statement """
     for preload in [True, False]:
@@ -890,6 +1068,7 @@ def test_with_statement():
 def test_compensation_raw():
     """Test Raw compensation
     """
+    tempdir = _TempDir()
     raw1 = Raw(ctf_comp_fname, compensation=None)
     assert_true(raw1.comp is None)
     data1, times1 = raw1[:, :]
@@ -929,6 +1108,8 @@ def test_compensation_raw():
 def test_compensation_raw_mne():
     """Test Raw compensation by comparing with MNE
     """
+    tempdir = _TempDir()
+
     def compensate_mne(fname, grad):
         tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
         cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
@@ -942,38 +1123,7 @@ def test_compensation_raw_mne():
         assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
 
 
-def test_set_eeg_reference():
-    """ Test rereference eeg data"""
-    raw = Raw(fif_fname, preload=True)
-
-    # Rereference raw data by creating a copy of original data
-    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
-
-    # Separate EEG channels from other channel types
-    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
-    picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
-                             stim=True, exclude='bads')
-
-    # Get the raw EEG data and other channel data
-    raw_eeg_data = raw[picks_eeg][0]
-    raw_other_data = raw[picks_other][0]
-
-    # Get the rereferenced EEG data and channel other
-    reref_eeg_data = reref[picks_eeg][0]
-    unref_eeg_data = reref_eeg_data + ref_data
-    # Undo rereferencing of EEG channels
-    reref_other_data = reref[picks_other][0]
-
-    # Check that both EEG data and other data is the same
-    assert_array_equal(raw_eeg_data, unref_eeg_data)
-    assert_array_equal(raw_other_data, reref_other_data)
-
-    # Test that data is modified in place when copy=False
-    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
-                                        copy=False)
-    assert_true(raw is reref)
-
-
+ at testing.requires_testing_data
 def test_drop_channels_mixin():
     """Test channels-dropping functionality
     """
@@ -989,10 +1139,11 @@ def test_drop_channels_mixin():
 
     raw.drop_channels(drop_ch)
     assert_equal(ch_names, raw.ch_names)
-    assert_equal(len(ch_names), len(raw.cals))
+    assert_equal(len(ch_names), len(raw._cals))
     assert_equal(len(ch_names), raw._data.shape[0])
 
 
+ at testing.requires_testing_data
 def test_pick_channels_mixin():
     """Test channel-picking functionality
     """
@@ -1009,14 +1160,16 @@ def test_pick_channels_mixin():
 
     raw.pick_channels(ch_names, copy=False)  # copy is False
     assert_equal(ch_names, raw.ch_names)
-    assert_equal(len(ch_names), len(raw.cals))
+    assert_equal(len(ch_names), len(raw._cals))
     assert_equal(len(ch_names), raw._data.shape[0])
+    assert_raises(ValueError, raw.pick_channels, ch_names[0])
 
     raw = Raw(fif_fname, preload=False)
     assert_raises(RuntimeError, raw.pick_channels, ch_names)
     assert_raises(RuntimeError, raw.drop_channels, ch_names)
 
 
+ at testing.requires_testing_data
 def test_equalize_channels():
     """Test equalization of channels
     """
@@ -1030,3 +1183,6 @@ def test_equalize_channels():
     equalize_channels(my_comparison)
     for e in my_comparison:
         assert_equal(ch_names, e.ch_names)
+
+
+run_tests_if_main()
diff --git a/mne/io/kit/__init__.py b/mne/io/kit/__init__.py
index 6bb193a..a3d74cc 100644
--- a/mne/io/kit/__init__.py
+++ b/mne/io/kit/__init__.py
@@ -1,12 +1,8 @@
 """KIT module for conversion to FIF"""
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
-from ...coreg import read_elp  # for backwards compatibility
-from .kit import read_raw_kit
-from .coreg import read_hsp, read_mrk, write_hsp, write_mrk
-from . import kit
-from . import coreg
-from . import constants
+from .kit import read_raw_kit, read_epochs_kit
+from .coreg import read_mrk
diff --git a/mne/io/kit/constants.py b/mne/io/kit/constants.py
index b4d3eae..7941223 100644
--- a/mne/io/kit/constants.py
+++ b/mne/io/kit/constants.py
@@ -1,6 +1,6 @@
 """KIT constants"""
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -19,7 +19,8 @@ KIT.STRING = 128
 KIT.AMPLIFIER_INFO = 112
 KIT.BASIC_INFO = 16
 KIT.CHAN_SENS = 80
-KIT.DATA_OFFSET = 144
+KIT.RAW_OFFSET = 144
+KIT.AVE_OFFSET = 160
 KIT.SAMPLE_INFO = 128
 KIT.MRK_INFO = 192
 KIT.CHAN_LOC_OFFSET = 64
@@ -32,8 +33,8 @@ KIT.UNIT_MUL = 0  # default is 0 mne_manual p.273
 
 # gain: 0:x1, 1:x2, 2:x5, 3:x10, 4:x20, 5:x50, 6:x100, 7:x200
 KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
-# BEF options: 0:THRU, 1:50Hz, 2:60Hz
-KIT.BEFS = [0, 50, 60]
+# BEF options: 0:THRU, 1:50Hz, 2:60Hz, 3:50Hz
+KIT.BEFS = [0, 50, 60, 50]
 
 # coreg constants
 KIT.DIG_POINTS = 10000
@@ -87,7 +88,7 @@ KIT_AD.GAIN3_BIT = 24  # stored in Bit 24-26
 KIT_AD.GAIN3_MASK = 2 ** 24 + 2 ** 25 + 2 ** 26
 KIT_AD.HPF_BIT = 8  # stored in Bit 8-10
 KIT_AD.HPF_MASK = 2 ** 8 + 2 ** 9 + 2 ** 10
-KIT_AD.LPF_BIT = 18  # stored in Bit 16-18
+KIT_AD.LPF_BIT = 16  # stored in Bit 16-18
 KIT_AD.LPF_MASK = 2 ** 16 + 2 ** 17 + 2 ** 18
 KIT_AD.BEF_BIT = 0  # stored in Bit 0-1
 KIT_AD.BEF_MASK = 2 ** 0 + 2 ** 1
diff --git a/mne/io/kit/coreg.py b/mne/io/kit/coreg.py
index a9075c1..48b56cd 100644
--- a/mne/io/kit/coreg.py
+++ b/mne/io/kit/coreg.py
@@ -1,21 +1,16 @@
 """Coordinate Point Extractor for KIT system"""
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
-from datetime import datetime
-from ...externals.six.moves import cPickle as pickle
-import os
-from os import SEEK_CUR
+from os import SEEK_CUR, path as op
 import re
 from struct import unpack
-
 import numpy as np
-
-from ... import __version__
 from .constants import KIT
-from ...externals.six import b
+from ..meas_info import _read_dig_points
+from ...externals.six.moves import cPickle as pickle
 
 
 def read_mrk(fname):
@@ -25,14 +20,14 @@ def read_mrk(fname):
     ----------
     fname : str
         Absolute path to Marker file.
-        File formats allowed: *.sqd, *.mrk, *.txt, *.pickled
+        File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
 
     Returns
     -------
     mrk_points : numpy.array, shape = (n_points, 3)
         Marker points in MEG space [m].
     """
-    ext = os.path.splitext(fname)[-1]
+    ext = op.splitext(fname)[-1]
     if ext in ('.sqd', '.mrk'):
         with open(fname, 'rb', buffering=0) as fid:
             fid.seek(KIT.MRK_INFO)
@@ -48,7 +43,7 @@ def read_mrk(fname):
                 pts.append(np.fromfile(fid, dtype='d', count=3))
                 mrk_points = np.array(pts)
     elif ext == '.txt':
-        mrk_points = np.loadtxt(fname)
+        mrk_points = _read_dig_points(fname)
     elif ext == '.pickled':
         with open(fname, 'rb') as fid:
             food = pickle.load(fid)
@@ -71,81 +66,6 @@ def read_mrk(fname):
     return mrk_points
 
 
-def write_mrk(fname, points):
-    """Save KIT marker coordinates
-
-    Parameters
-    ----------
-    fname : str
-        Path to the file to write. The kind of file to write is determined
-        based on the extension: '.txt' for tab separated text file, '.pickled'
-        for pickled file.
-    points : array_like, shape = (5, 3)
-        The marker point coordinates.
-    """
-    mrk = np.asarray(points)
-    _, ext = os.path.splitext(fname)
-    if mrk.shape != (5, 3):
-        err = ("KIT marker points array needs to have shape (5, 3), got "
-               "%s." % str(mrk.shape))
-        raise ValueError(err)
-
-    if ext == '.pickled':
-        with open(fname, 'wb') as fid:
-            pickle.dump({'mrk': mrk}, fid, pickle.HIGHEST_PROTOCOL)
-    elif ext == '.txt':
-        np.savetxt(fname, mrk, fmt='%.18e', delimiter='\t', newline='\n')
-    else:
-        err = "Unrecognized extension: %r. Need '.txt' or '.pickled'." % ext
-        raise ValueError(err)
-
-
-def read_hsp(fname):
-    """Read a Polhemus ascii head shape file
-
-    Parameters
-    ----------
-    fname : str
-        Path to head shape file acquired from Polhemus system and saved in
-        ascii format.
-
-    Returns
-    -------
-    hsp_points : numpy.array, shape = (n_points, 3)
-        Headshape points in Polhemus head space.
-        File formats allowed: *.txt, *.pickled
-    """
-    pattern = re.compile(r'(\-?\d+\.\d+)\s+(\-?\d+\.\d+)\s+(\-?\d+\.\d+)')
-    with open(fname) as fid:
-        hsp_points = pattern.findall(fid.read())
-    hsp_points = np.array(hsp_points, dtype=float)
-    return hsp_points
-
-
-def write_hsp(fname, pts):
-    """Write a headshape hsp file
-
-    Parameters
-    ----------
-    fname : str
-        Target file.
-    pts : array, shape = (n_pts, 3)
-        Points comprising the headshape.
-    """
-    pts = np.asarray(pts)
-    if (pts.ndim != 2) or (pts.shape[1] != 3):
-        err = "pts must be of shape (n_pts, 3), not %r" % str(pts.shape)
-        raise ValueError(err)
-
-    with open(fname, 'wb') as fid:
-        version = __version__
-        now = datetime.now().strftime("%I:%M%p on %B %d, %Y")
-        fid.write(b("% Ascii 3D points file created by mne-python version "
-                    "{version} at {now}\n".format(version=version, now=now)))
-        fid.write(b("% {N} 3D points, x y z per line\n".format(N=len(pts))))
-        np.savetxt(fid, pts, '%8.2f', ' ')
-
-
 def read_sns(fname):
     """Sensor coordinate extraction in MEG space
 
diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py
index 5d7a5ce..df0eb35 100644
--- a/mne/io/kit/kit.py
+++ b/mne/io/kit/kit.py
@@ -4,12 +4,11 @@ RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
 
 """
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
-import os
-from os import SEEK_CUR
+from os import SEEK_CUR, path as op
 from struct import unpack
 import time
 
@@ -17,17 +16,18 @@ import numpy as np
 from scipy import linalg
 
 from ..pick import pick_types
-from ...coreg import (read_elp, fit_matched_points, _decimate_points,
-                      get_ras_to_neuromag_trans)
+from ...coreg import fit_matched_points, _decimate_points
 from ...utils import verbose, logger
-from ...transforms import apply_trans, als_ras_trans, als_ras_trans_mm
+from ...transforms import (apply_trans, als_ras_trans, als_ras_trans_mm,
+                           get_ras_to_neuromag_trans, Transform)
 from ..base import _BaseRaw
+from ...epochs import _BaseEpochs
 from ..constants import FIFF
-from ..meas_info import Info
-from ..tag import _loc_to_trans
+from ..meas_info import _empty_info, _read_dig_points, _make_dig_points
 from .constants import KIT, KIT_NY, KIT_AD
-from .coreg import read_hsp, read_mrk
+from .coreg import read_mrk
 from ...externals.six import string_types
+from ...event import read_events
 
 
 class RawKIT(_BaseRaw):
@@ -61,13 +61,23 @@ class RawKIT(_BaseRaw):
         is interpreted as an event.
     stimthresh : float
         The threshold level for accepting voltage changes in KIT trigger
-        channels as a trigger event.
-    preload : bool
-        If True, all data are loaded at initialization.
-        If False, data are not read until save.
+        channels as a trigger event. If None, stim must also be set to None.
+    preload : bool or str (default False)
+        Preload data into memory for data manipulation and faster indexing.
+        If True, the data will be preloaded into memory (fast, requires
+        large amount of memory). If preload is a string, preload is the
+        file name of a memory-mapped file which is used to store the data
+        on the hard drive (slower, requires less memory).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
+    Notes
+    -----
+    ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
+    Polhemus FastScan system. hsp refers to the headshape surface points. elp
+    refers to the points in head-space that corresponds to the HPI points.
+    Currently, '*.elp' and '*.hsp' files are NOT supported.
+
     See Also
     --------
     mne.io.Raw : Documentation of attribute and methods.
@@ -76,177 +86,38 @@ class RawKIT(_BaseRaw):
     def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
                  slope='-', stimthresh=1, preload=False, verbose=None):
         logger.info('Extracting SQD Parameters from %s...' % input_fname)
-        input_fname = os.path.abspath(input_fname)
-        self._sqd_params = get_sqd_params(input_fname)
-        self._sqd_params['stimthresh'] = stimthresh
-        self._sqd_params['fname'] = input_fname
-        logger.info('Creating Raw.info structure...')
-
-        # Raw attributes
-        self.verbose = verbose
+        input_fname = op.abspath(input_fname)
         self.preload = False
-        self._projector = None
-        self.first_samp = 0
-        self.last_samp = self._sqd_params['nsamples'] - 1
-        self.comp = None  # no compensation for KIT
-        self.proj = False
-
-        # Create raw.info dict for raw fif object with SQD data
-        self.info = Info()
-        self.info['meas_id'] = None
-        self.info['file_id'] = None
-        self.info['meas_date'] = int(time.time())
-        self.info['projs'] = []
-        self.info['comps'] = []
-        self.info['lowpass'] = self._sqd_params['lowpass']
-        self.info['highpass'] = self._sqd_params['highpass']
-        self.info['sfreq'] = float(self._sqd_params['sfreq'])
-        # meg channels plus synthetic channel
-        self.info['nchan'] = self._sqd_params['nchan'] + 1
-        self.info['bads'] = []
-        self.info['acq_pars'], self.info['acq_stim'] = None, None
-        self.info['filename'] = None
-        self.info['ctf_head_t'] = None
-        self.info['dev_ctf_t'] = []
-        self._filenames = []
-        self.info['dig'] = None
-        self.info['dev_head_t'] = None
+        logger.info('Creating Raw.info structure...')
+        info, kit_info = get_kit_info(input_fname)
+        kit_info['slope'] = slope
+        kit_info['stimthresh'] = stimthresh
+        if kit_info['acq_type'] != 1:
+            err = 'SQD file contains epochs, not raw data. Wrong reader.'
+            raise TypeError(err)
+        logger.info('Creating Info structure...')
+
+        last_samps = [kit_info['n_samples'] - 1]
+        self._raw_extras = [kit_info]
+        self._set_stimchannels(info, stim)
+        super(RawKIT, self).__init__(
+            info, preload, last_samps=last_samps, filenames=[input_fname],
+            raw_extras=self._raw_extras, verbose=verbose)
 
         if isinstance(mrk, list):
             mrk = [read_mrk(marker) if isinstance(marker, string_types)
                    else marker for marker in mrk]
             mrk = np.mean(mrk, axis=0)
-
         if (mrk is not None and elp is not None and hsp is not None):
-            self._set_dig_kit(mrk, elp, hsp)
+            dig_points, dev_head_t = _set_dig_kit(mrk, elp, hsp)
+            self.info['dig'] = dig_points
+            self.info['dev_head_t'] = dev_head_t
         elif (mrk is not None or elp is not None or hsp is not None):
-            err = ("mrk, elp and hsp need to be provided as a group (all or "
-                   "none)")
-            raise ValueError(err)
-
-        # Creates a list of dicts of meg channels for raw.info
-        logger.info('Setting channel info structure...')
-        ch_names = {}
-        ch_names['MEG'] = ['MEG %03d' % ch for ch
-                           in range(1, self._sqd_params['n_sens'] + 1)]
-        ch_names['MISC'] = ['MISC %03d' % ch for ch
-                            in range(1, self._sqd_params['nmiscchan'] + 1)]
-        ch_names['STIM'] = ['STI 014']
-        locs = self._sqd_params['sensor_locs']
-        chan_locs = apply_trans(als_ras_trans, locs[:, :3])
-        chan_angles = locs[:, 3:]
-        self.info['chs'] = []
-        for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
-                                          chan_angles), 1):
-            ch_name, ch_loc, ch_angles = ch_info
-            chan_info = {}
-            chan_info['cal'] = KIT.CALIB_FACTOR
-            chan_info['logno'] = idx
-            chan_info['scanno'] = idx
-            chan_info['range'] = KIT.RANGE
-            chan_info['unit_mul'] = KIT.UNIT_MUL
-            chan_info['ch_name'] = ch_name
-            chan_info['unit'] = FIFF.FIFF_UNIT_T
-            chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
-            if idx <= self._sqd_params['nmegchan']:
-                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
-                chan_info['kind'] = FIFF.FIFFV_MEG_CH
-            else:
-                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
-                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
-            chan_info['eeg_loc'] = None
+            raise ValueError('mrk, elp and hsp need to be provided as a group '
+                             '(all or none)')
 
-            # create three orthogonal vector
-            # ch_angles[0]: theta, ch_angles[1]: phi
-            ch_angles = np.radians(ch_angles)
-            x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
-            y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
-            z = np.cos(ch_angles[0])
-            vec_z = np.array([x, y, z])
-            length = linalg.norm(vec_z)
-            vec_z /= length
-            vec_x = np.zeros(vec_z.size, dtype=np.float)
-            if vec_z[1] < vec_z[2]:
-                if vec_z[0] < vec_z[1]:
-                    vec_x[0] = 1.0
-                else:
-                    vec_x[1] = 1.0
-            elif vec_z[0] < vec_z[2]:
-                vec_x[0] = 1.0
-            else:
-                vec_x[2] = 1.0
-            vec_x -= np.sum(vec_x * vec_z) * vec_z
-            length = linalg.norm(vec_x)
-            vec_x /= length
-            vec_y = np.cross(vec_z, vec_x)
-            # transform to Neuromag like coordinate space
-            vecs = np.vstack((vec_x, vec_y, vec_z))
-            vecs = apply_trans(als_ras_trans, vecs)
-            chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
-            chan_info['coil_trans'] = _loc_to_trans(chan_info['loc'])
-            self.info['chs'].append(chan_info)
-
-        # label trigger and misc channels
-        for idy, ch_name in enumerate(ch_names['MISC'] + ch_names['STIM'],
-                                      self._sqd_params['n_sens']):
-            chan_info = {}
-            chan_info['cal'] = KIT.CALIB_FACTOR
-            chan_info['logno'] = idy
-            chan_info['scanno'] = idy
-            chan_info['range'] = 1.0
-            chan_info['unit'] = FIFF.FIFF_UNIT_V
-            chan_info['unit_mul'] = 0  # default is 0 mne_manual p.273
-            chan_info['ch_name'] = ch_name
-            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
-            chan_info['loc'] = np.zeros(12)
-            if ch_name.startswith('STI'):
-                chan_info['unit'] = FIFF.FIFF_UNIT_NONE
-                chan_info['kind'] = FIFF.FIFFV_STIM_CH
-            else:
-                chan_info['kind'] = FIFF.FIFFV_MISC_CH
-            self.info['chs'].append(chan_info)
-        self.info['ch_names'] = (ch_names['MEG'] + ch_names['MISC'] +
-                                 ch_names['STIM'])
-
-        self._set_stimchannels(stim, slope)
-        if preload:
-            self.preload = preload
-            logger.info('Reading raw data from %s...' % input_fname)
-            self._data, _ = self._read_segment()
-            assert len(self._data) == self.info['nchan']
-
-            # Create a synthetic channel
-            stim = self._sqd_params['stim']
-            trig_chs = self._data[stim, :]
-            if slope == '+':
-                trig_chs = trig_chs > stimthresh
-            elif slope == '-':
-                trig_chs = trig_chs < stimthresh
-            else:
-                raise ValueError("slope needs to be '+' or '-'")
-            trig_vals = np.array(2 ** np.arange(len(stim)), ndmin=2).T
-            trig_chs = trig_chs * trig_vals
-            stim_ch = trig_chs.sum(axis=0)
-            self._data[-1, :] = stim_ch
-
-            # Add time info
-            self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
-            self._times = np.arange(self.first_samp, self.last_samp + 1,
-                                    dtype=np.float64)
-            self._times /= self.info['sfreq']
-            logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs'
-                        % (self.first_samp, self.last_samp,
-                           float(self.first_samp) / self.info['sfreq'],
-                           float(self.last_samp) / self.info['sfreq']))
         logger.info('Ready.')
 
-    def __repr__(self):
-        s = ('%r' % os.path.basename(self._sqd_params['fname']),
-             "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
-                                                 self.last_samp -
-                                                 self.first_samp + 1))
-        return "<RawKIT  |  %s>" % ', '.join(s)
-
     def read_stim_ch(self, buffer_size=1e5):
         """Read events from data
 
@@ -269,29 +140,259 @@ class RawKIT(_BaseRaw):
         stim_ch = np.empty((1, stop), dtype=np.int)
         for b_start in range(start, stop, buffer_size):
             b_stop = b_start + buffer_size
-            x, _ = self._read_segment(start=b_start, stop=b_stop, sel=pick)
+            x = self[pick, b_start:b_stop][0]
             stim_ch[:, b_start:b_start + x.shape[1]] = x
 
         return stim_ch
 
-    def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
-                      projector=None):
-        """Read a chunk of raw data
+    def _set_stimchannels(self, info, stim='<'):
+        """Specify how the trigger channel is synthesized from analog channels.
+
+        Has to be done before loading data. For a RawKIT instance that has been
+        created with preload=True, this method will raise a
+        NotImplementedError.
 
         Parameters
         ----------
-        start : int, (optional)
-            first sample to include (first is 0). If omitted, defaults to the
-            first sample in data.
-        stop : int, (optional)
-            First sample to not include.
-            If omitted, data is included to the end.
-        sel : array, optional
-            Indices of channels to select.
-        projector : array
-            SSP operator to apply to the data.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
+        info : instance of MeasInfo
+            The measurement info.
+        stim : list of int | '<' | '>'
+            Can be submitted as list of trigger channels.
+            If a list is not specified, the default triggers extracted from
+            misc channels will be used with specified directionality.
+            '<' means that largest values assigned to the first channel
+            in sequence.
+            '>' means the largest trigger assigned to the last channel
+            in sequence.
+        """
+        if stim is not None:
+            if isinstance(stim, str):
+                picks = pick_types(info, meg=False, ref_meg=False,
+                                   misc=True, exclude=[])[:8]
+                if stim == '<':
+                    stim = picks[::-1]
+                elif stim == '>':
+                    stim = picks
+                else:
+                    raise ValueError("stim needs to be list of int, '>' or "
+                                     "'<', not %r" % str(stim))
+            elif np.max(stim) >= self._raw_extras[0]['nchan']:
+                raise ValueError('Tried to set stim channel %i, but sqd file '
+                                 'only has %i channels'
+                                 % (np.max(stim),
+                                    self._raw_extras[0]['nchan']))
+            # modify info
+            info['nchan'] = self._raw_extras[0]['nchan'] + 1
+            ch_name = 'STI 014'
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = info['nchan']
+            chan_info['scanno'] = info['nchan']
+            chan_info['range'] = 1.0
+            chan_info['unit'] = FIFF.FIFF_UNIT_NONE
+            chan_info['unit_mul'] = 0
+            chan_info['ch_name'] = ch_name
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['loc'] = np.zeros(12)
+            chan_info['kind'] = FIFF.FIFFV_STIM_CH
+            info['chs'].append(chan_info)
+            info['ch_names'].append(ch_name)
+        if self.preload:
+            err = "Can't change stim channel after preloading data"
+            raise NotImplementedError(err)
+
+        self._raw_extras[0]['stim'] = stim
+
+    @verbose
+    def _read_segment_file(self, data, idx, offset, fi, start, stop,
+                           cals, mult):
+        """Read a chunk of raw data"""
+        # cals are all unity, so can be ignored
+
+        # RawFIF and RawEDF think of "stop" differently, easiest to increment
+        # here and refactor later
+        stop += 1
+        with open(self._filenames[fi], 'rb', buffering=0) as fid:
+            # extract data
+            data_offset = KIT.RAW_OFFSET
+            fid.seek(data_offset)
+            # data offset info
+            data_offset = unpack('i', fid.read(KIT.INT))[0]
+            nchan = self._raw_extras[fi]['nchan']
+            buffer_size = stop - start
+            count = buffer_size * nchan
+            pointer = start * nchan * KIT.SHORT
+            fid.seek(data_offset + pointer)
+            data_ = np.fromfile(fid, dtype='h', count=count)
+
+        # amplifier applies only to the sensor channels
+        data_.shape = (buffer_size, nchan)
+        n_sens = self._raw_extras[fi]['n_sens']
+        sensor_gain = self._raw_extras[fi]['sensor_gain'].copy()
+        sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
+                                self._raw_extras[fi]['amp_gain'])
+        conv_factor = np.array((KIT.VOLTAGE_RANGE /
+                                self._raw_extras[fi]['DYNAMIC_RANGE']) *
+                               sensor_gain)
+        data_ = conv_factor[:, np.newaxis] * data_.T
+
+        # Create a synthetic channel
+        if self._raw_extras[fi]['stim'] is not None:
+            trig_chs = data_[self._raw_extras[fi]['stim'], :]
+            if self._raw_extras[fi]['slope'] == '+':
+                trig_chs = trig_chs > self._raw_extras[0]['stimthresh']
+            elif self._raw_extras[fi]['slope'] == '-':
+                trig_chs = trig_chs < self._raw_extras[0]['stimthresh']
+            else:
+                raise ValueError("slope needs to be '+' or '-'")
+            trig_vals = np.array(
+                2 ** np.arange(len(self._raw_extras[0]['stim'])), ndmin=2).T
+            trig_chs = trig_chs * trig_vals
+            stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
+            data_ = np.vstack((data_, stim_ch))
+        data[:, offset:offset + (stop - start)] = \
+            np.dot(mult, data_) if mult is not None else data_[idx]
+
+
+class EpochsKIT(_BaseEpochs):
+    """Epochs Array object from KIT SQD file
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    events : str | array, shape (n_events, 3)
+        Path to events file. If array, it is the events typically returned
+        by the read_events function. If some events don't match the events
+        of interest as specified by event_id,they will be marked as 'IGNORED'
+        in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    tmin : float
+        Start time before event.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
+        If flat is None then no rejection is done.
+    reject_tmin : scalar | None
+        Start of the time window used to reject epochs (with the default None,
+        the window will start with tmin).
+    reject_tmax : scalar | None
+        End of the time window used to reject epochs (with the default None,
+        the window will end with tmax).
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
+    Polhemus FastScan system. hsp refers to the headshape surface points. elp
+    refers to the points in head-space that corresponds to the HPI points.
+    Currently, '*.elp' and '*.hsp' files are NOT supported.
+
+    See Also
+    --------
+    mne.Epochs : Documentation of attribute and methods.
+    """
+    @verbose
+    def __init__(self, input_fname, events, event_id=None, tmin=0,
+                 baseline=None,  reject=None, flat=None, reject_tmin=None,
+                 reject_tmax=None, mrk=None, elp=None, hsp=None, verbose=None):
+
+        if isinstance(events, string_types):
+            events = read_events(events)
+        if isinstance(mrk, list):
+            mrk = [read_mrk(marker) if isinstance(marker, string_types)
+                   else marker for marker in mrk]
+            mrk = np.mean(mrk, axis=0)
+
+        if (mrk is not None and elp is not None and hsp is not None):
+            dig_points, dev_head_t = _set_dig_kit(mrk, elp, hsp)
+            self.info['dig'] = dig_points
+            self.info['dev_head_t'] = dev_head_t
+        elif (mrk is not None or elp is not None or hsp is not None):
+            err = ("mrk, elp and hsp need to be provided as a group (all or "
+                   "none)")
+            raise ValueError(err)
+
+        logger.info('Extracting KIT Parameters from %s...' % input_fname)
+        input_fname = op.abspath(input_fname)
+        self.info, kit_info = get_kit_info(input_fname)
+        self._raw_extras = [kit_info]
+        if len(events) != self._raw_extras[0]['n_epochs']:
+            raise ValueError('Event list does not match number of epochs.')
+
+        if self._raw_extras[0]['acq_type'] == 3:
+            self._raw_extras[0]['data_offset'] = KIT.RAW_OFFSET
+            self._raw_extras[0]['data_length'] = KIT.INT
+            self._raw_extras[0]['dtype'] = 'h'
+        else:
+            err = ('SQD file contains raw data, not epochs or average. '
+                   'Wrong reader.')
+            raise TypeError(err)
+
+        if event_id is None:  # convert to int to make typing-checks happy
+            event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
+
+        for key, val in event_id.items():
+            if val not in events[:, 2]:
+                raise ValueError('No matching events found for %s '
+                                 '(event id %i)' % (key, val))
+
+        self._filename = input_fname
+        data = self._read_kit_data()
+        assert data.shape == (self._raw_extras[0]['n_epochs'],
+                              self.info['nchan'],
+                              self._raw_extras[0]['frame_length'])
+        tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
+        super(EpochsKIT, self).__init__(self.info, data, events, event_id,
+                                        tmin, tmax, baseline,
+                                        reject=reject, flat=flat,
+                                        reject_tmin=reject_tmin,
+                                        reject_tmax=reject_tmax,
+                                        verbose=verbose)
+        logger.info('Ready.')
+
+    def _read_kit_data(self):
+        """Read epochs data
 
         Returns
         -------
@@ -300,237 +401,130 @@ class RawKIT(_BaseRaw):
         times : array, [samples]
             returns the time values corresponding to the samples.
         """
-        if sel is None:
-            sel = list(range(self.info['nchan']))
-        elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
-            return (666, 666)
-        if projector is not None:
-            raise NotImplementedError('Currently does not handle projections.')
-        if stop is None:
-            stop = self.last_samp + 1
-        elif stop > self.last_samp + 1:
-            stop = self.last_samp + 1
-
         #  Initial checks
-        start = int(start)
-        stop = int(stop)
-
-        if start >= stop:
-            raise ValueError('No data in this range')
+        epoch_length = self._raw_extras[0]['frame_length']
+        n_epochs = self._raw_extras[0]['n_epochs']
+        n_samples = self._raw_extras[0]['n_samples']
 
-        logger.info('Reading %d ... %d  =  %9.3f ... %9.3f secs...' %
-                    (start, stop - 1, start / float(self.info['sfreq']),
-                     (stop - 1) / float(self.info['sfreq'])))
-
-        with open(self._sqd_params['fname'], 'rb', buffering=0) as fid:
+        with open(self._filename, 'rb', buffering=0) as fid:
             # extract data
-            fid.seek(KIT.DATA_OFFSET)
+            data_offset = self._raw_extras[0]['data_offset']
+            dtype = self._raw_extras[0]['dtype']
+            fid.seek(data_offset)
             # data offset info
             data_offset = unpack('i', fid.read(KIT.INT))[0]
-            nchan = self._sqd_params['nchan']
-            buffer_size = stop - start
-            count = buffer_size * nchan
-            pointer = start * nchan * KIT.SHORT
-            fid.seek(data_offset + pointer)
-            data = np.fromfile(fid, dtype='h', count=count)
-            data = data.reshape((buffer_size, nchan))
+            nchan = self._raw_extras[0]['nchan']
+            count = n_samples * nchan
+            fid.seek(data_offset)
+            data = np.fromfile(fid, dtype=dtype, count=count)
+            data = data.reshape((n_samples, nchan))
         # amplifier applies only to the sensor channels
-        n_sens = self._sqd_params['n_sens']
-        sensor_gain = np.copy(self._sqd_params['sensor_gain'])
+        n_sens = self._raw_extras[0]['n_sens']
+        sensor_gain = np.copy(self._raw_extras[0]['sensor_gain'])
         sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
-                                self._sqd_params['amp_gain'])
+                                self._raw_extras[0]['amp_gain'])
         conv_factor = np.array((KIT.VOLTAGE_RANGE /
-                                self._sqd_params['DYNAMIC_RANGE'])
-                               * sensor_gain, ndmin=2)
+                                self._raw_extras[0]['DYNAMIC_RANGE']) *
+                               sensor_gain, ndmin=2)
         data = conv_factor * data
+        # reshape
         data = data.T
-        # Create a synthetic channel
-        trig_chs = data[self._sqd_params['stim'], :]
-        if self._sqd_params['slope'] == '+':
-            trig_chs = trig_chs > self._sqd_params['stimthresh']
-        elif self._sqd_params['slope'] == '-':
-            trig_chs = trig_chs < self._sqd_params['stimthresh']
-        else:
-            raise ValueError("slope needs to be '+' or '-'")
-        trig_vals = np.array(2 ** np.arange(len(self._sqd_params['stim'])),
-                             ndmin=2).T
-        trig_chs = trig_chs * trig_vals
-        stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
-        data = np.vstack((data, stim_ch))
-        data = data[sel]
+        data = data.reshape((nchan, n_epochs, epoch_length))
+        data = data.transpose((1, 0, 2))
 
-        logger.info('[done]')
-        times = np.arange(start, stop) / self.info['sfreq']
+        return data
 
-        return data, times
 
-    def _set_dig_kit(self, mrk, elp, hsp, auto_decimate=True):
-        """Add landmark points and head shape data to the RawKIT instance
+def _set_dig_kit(mrk, elp, hsp, auto_decimate=True):
+    """Add landmark points and head shape data to the KIT instance
 
-        Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
-        ALS coordinate system.
+    Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
+    ALS coordinate system.
 
-        Parameters
-        ----------
-        mrk : None | str | array_like, shape = (5, 3)
-            Marker points representing the location of the marker coils with
-            respect to the MEG Sensors, or path to a marker file.
-        elp : None | str | array_like, shape = (8, 3)
-            Digitizer points representing the location of the fiducials and the
-            marker coils with respect to the digitized head shape, or path to a
-            file containing these points.
-        hsp : None | str | array, shape = (n_points, 3)
-            Digitizer head shape points, or path to head shape file. If more
-            than 10`000 points are in the head shape, they are automatically
-            decimated.
-        auto_decimate : bool
-            Decimate hsp points for head shape files with more than 10'000
-            points.
-        """
-        if isinstance(hsp, string_types):
-            hsp = read_hsp(hsp)
-
-        n_pts = len(hsp)
-        if n_pts > KIT.DIG_POINTS:
-            hsp = _decimate_points(hsp, 5)
-            n_new = len(hsp)
-            msg = ("The selected head shape contained {n_in} points, which is "
-                   "more than recommended ({n_rec}), and was automatically "
-                   "downsampled to {n_new} points. The preferred way to "
-                   "downsample is using FastScan.")
-            msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
-            logger.warning(msg)
-
-        if isinstance(elp, string_types):
-            elp_points = read_elp(elp)[:8]
-            if len(elp) < 8:
-                err = ("File %r contains fewer than 8 points; got shape "
-                       "%s." % (elp, elp_points.shape))
-                raise ValueError(err)
-            elp = elp_points
-
-        if isinstance(mrk, string_types):
-            mrk = read_mrk(mrk)
-
-        hsp = apply_trans(als_ras_trans_mm, hsp)
-        elp = apply_trans(als_ras_trans_mm, elp)
-        mrk = apply_trans(als_ras_trans, mrk)
-
-        nasion, lpa, rpa = elp[:3]
-        nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
-        elp = apply_trans(nmtrans, elp)
-        hsp = apply_trans(nmtrans, hsp)
-
-        # device head transform
-        trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
-
-        self._set_dig_neuromag(elp[:3], elp[3:], hsp, trans)
-
-    def _set_dig_neuromag(self, fid, elp, hsp, trans):
-        """Fill in the digitizer data using points in neuromag space
+    Parameters
+    ----------
+    mrk : None | str | array_like, shape = (5, 3)
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more
+        than 10`000 points are in the head shape, they are automatically
+        decimated.
+    auto_decimate : bool
+        Decimate hsp points for head shape files with more than 10'000
+        points.
 
-        Parameters
-        ----------
-        fid : array, shape = (3, 3)
-            Digitizer fiducials.
-        elp : array, shape = (5, 3)
-            Digitizer ELP points.
-        hsp : array, shape = (n_points, 3)
-            Head shape points.
-        trans : None | array, shape = (4, 4)
-            Device head transformation.
-        """
-        trans = np.asarray(trans)
-        if fid.shape != (3, 3):
-            raise ValueError("fid needs to be a 3 by 3 array")
-        if elp.shape != (5, 3):
-            raise ValueError("elp needs to be a 5 by 3 array")
-        if trans.shape != (4, 4):
-            raise ValueError("trans needs to be 4 by 4 array")
-
-        nasion, lpa, rpa = fid
-        dig = [{'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
-                'kind': FIFF.FIFFV_POINT_CARDINAL,
-                'coord_frame':  FIFF.FIFFV_COORD_HEAD},
-               {'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
-                'kind': FIFF.FIFFV_POINT_CARDINAL,
-                'coord_frame': FIFF.FIFFV_COORD_HEAD},
-               {'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
-                'kind': FIFF.FIFFV_POINT_CARDINAL,
-                'coord_frame': FIFF.FIFFV_COORD_HEAD}]
-
-        for idx, point in enumerate(elp):
-            dig.append({'r': point, 'ident': idx, 'kind': FIFF.FIFFV_POINT_HPI,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD})
-
-        for idx, point in enumerate(hsp):
-            dig.append({'r': point, 'ident': idx,
-                        'kind': FIFF.FIFFV_POINT_EXTRA,
-                        'coord_frame': FIFF.FIFFV_COORD_HEAD})
-
-        dev_head_t = {'from': FIFF.FIFFV_COORD_DEVICE,
-                      'to': FIFF.FIFFV_COORD_HEAD, 'trans': trans}
-
-        self.info['dig'] = dig
-        self.info['dev_head_t'] = dev_head_t
-
-    def _set_stimchannels(self, stim='<', slope='-'):
-        """Specify how the trigger channel is synthesized form analog channels.
+    Returns
+    -------
+    dig_points : list
+        List of digitizer points for info['dig'].
+    dev_head_t : dict
+        A dictionary describe the device-head transformation.
+    """
+    if isinstance(hsp, string_types):
+        hsp = _read_dig_points(hsp)
+    n_pts = len(hsp)
+    if n_pts > KIT.DIG_POINTS:
+        hsp = _decimate_points(hsp, res=5)
+        n_new = len(hsp)
+        msg = ("The selected head shape contained {n_in} points, which is "
+               "more than recommended ({n_rec}), and was automatically "
+               "downsampled to {n_new} points. The preferred way to "
+               "downsample is using FastScan."
+               ).format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
+        logger.warning(msg)
+
+    if isinstance(elp, string_types):
+        elp_points = _read_dig_points(elp)
+        if len(elp_points) != 8:
+            err = ("File %r should contain 8 points; got shape "
+                   "%s." % (elp, elp_points.shape))
+            raise ValueError(err)
+        elp = elp_points
 
-        Has to be done before loading data. For a RawKIT instance that has been
-        created with preload=True, this method will raise a
-        NotImplementedError.
+    elif len(elp) != 8:
+        err = ("ELP should contain 8 points; got shape "
+               "%s." % (elp.shape,))
+    if isinstance(mrk, string_types):
+        mrk = read_mrk(mrk)
 
-        Parameters
-        ----------
-        stim : list of int | '<' | '>'
-            Can be submitted as list of trigger channels.
-            If a list is not specified, the default triggers extracted from
-            misc channels will be used with specified directionality.
-            '<' means that largest values assigned to the first channel
-            in sequence.
-            '>' means the largest trigger assigned to the last channel
-            in sequence.
-        slope : '+' | '-'
-            '+' means a positive slope (low-to-high) on the event channel(s)
-            is used to trigger an event.
-            '-' means a negative slope (high-to-low) on the event channel(s)
-            is used to trigger an event.
-        """
-        if self.preload:
-            err = "Can't change stim channel after preloading data"
-            raise NotImplementedError(err)
+    hsp = apply_trans(als_ras_trans_mm, hsp)
+    elp = apply_trans(als_ras_trans_mm, elp)
+    mrk = apply_trans(als_ras_trans, mrk)
 
-        self._sqd_params['slope'] = slope
+    nasion, lpa, rpa = elp[:3]
+    nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
+    elp = apply_trans(nmtrans, elp)
+    hsp = apply_trans(nmtrans, hsp)
 
-        if isinstance(stim, str):
-            picks = pick_types(self.info, meg=False, ref_meg=False,
-                               misc=True, exclude=[])[:8]
-            if stim == '<':
-                stim = picks[::-1]
-            elif stim == '>':
-                stim = picks
-            else:
-                raise ValueError("stim needs to be list of int, '>' or "
-                                 "'<', not %r" % str(stim))
-        elif np.max(stim) >= self._sqd_params['nchan']:
-            msg = ("Tried to set stim channel %i, but squid file only has %i"
-                   " channels" % (np.max(stim), self._sqd_params['nchan']))
-            raise ValueError(msg)
+    # device head transform
+    trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
+
+    nasion, lpa, rpa = elp[:3]
+    elp = elp[3:]
 
-        self._sqd_params['stim'] = stim
+    dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp)
+    dev_head_t = Transform('meg', 'head', trans)
 
+    return dig_points, dev_head_t
 
-def get_sqd_params(rawfile):
+
+def get_kit_info(rawfile):
     """Extracts all the information from the sqd file.
 
     Parameters
     ----------
     rawfile : str
-        Raw sqd file to be read.
+        KIT file to be read.
 
     Returns
     -------
+    info : instance of Info
+        An Info for the instance.
     sqd : dict
         A dict containing all the sqd parameter settings.
     """
@@ -589,24 +583,24 @@ def get_sqd_params(rawfile):
         fid.seek(amp_offset)
         amp_data = unpack('i', fid.read(KIT_SYS.INT))[0]
 
-        gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data)
-                              >> KIT_SYS.GAIN1_BIT]
-        gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data)
-                              >> KIT_SYS.GAIN2_BIT]
+        gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data) >>
+                              KIT_SYS.GAIN1_BIT]
+        gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data) >>
+                              KIT_SYS.GAIN2_BIT]
         if KIT_SYS.GAIN3_BIT:
-            gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data)
-                                  >> KIT_SYS.GAIN3_BIT]
+            gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data) >>
+                                  KIT_SYS.GAIN3_BIT]
             sqd['amp_gain'] = gain1 * gain2 * gain3
         else:
             sqd['amp_gain'] = gain1 * gain2
 
         # filter settings
-        sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data)
-                                      >> KIT_SYS.LPF_BIT]
-        sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data)
-                                       >> KIT_SYS.HPF_BIT]
-        sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data)
-                                    >> KIT_SYS.BEF_BIT]
+        sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data) >>
+                                      KIT_SYS.LPF_BIT]
+        sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data) >>
+                                       KIT_SYS.HPF_BIT]
+        sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data) >>
+                                    KIT_SYS.BEF_BIT]
 
         # only sensor channels requires gain. the additional misc channels
         # (trigger channels, audio and voice channels) are passed
@@ -625,19 +619,110 @@ def get_sqd_params(rawfile):
         acqcond_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
         fid.seek(acqcond_offset)
         acq_type = unpack('i', fid.read(KIT_SYS.INT))[0]
+        sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
         if acq_type == 1:
-            sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
-            _ = fid.read(KIT_SYS.INT)  # initialized estimate of samples
-            sqd['nsamples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            fid.read(KIT_SYS.INT)  # initialized estimate of samples
+            sqd['n_samples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+        elif acq_type == 2 or acq_type == 3:
+            sqd['frame_length'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['pretrigger_length'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['average_count'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['n_epochs'] = unpack('i', fid.read(KIT_SYS.INT))[0]
+            sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
         else:
-            err = ("You are probably trying to load a file that is not a "
-                   "continuous recording sqd file.")
-            raise ValueError(err)
+            err = ("Your file is neither continuous nor epoched data. "
+                   "What type of file is it?!")
+            raise TypeError(err)
         sqd['n_sens'] = KIT_SYS.N_SENS
         sqd['nmegchan'] = KIT_SYS.NMEGCHAN
         sqd['nmiscchan'] = KIT_SYS.NMISCCHAN
         sqd['DYNAMIC_RANGE'] = KIT_SYS.DYNAMIC_RANGE
-    return sqd
+        sqd['acq_type'] = acq_type
+
+        # Create raw.info dict for raw fif object with SQD data
+        info = _empty_info()
+        info.update(meas_date=int(time.time()), lowpass=sqd['lowpass'],
+                    highpass=sqd['highpass'], sfreq=float(sqd['sfreq']),
+                    filename=rawfile, nchan=sqd['nchan'])
+
+        # Creates a list of dicts of meg channels for raw.info
+        logger.info('Setting channel info structure...')
+        ch_names = {}
+        ch_names['MEG'] = ['MEG %03d' % ch for ch
+                           in range(1, sqd['n_sens'] + 1)]
+        ch_names['MISC'] = ['MISC %03d' % ch for ch
+                            in range(1, sqd['nmiscchan'] + 1)]
+        locs = sqd['sensor_locs']
+        chan_locs = apply_trans(als_ras_trans, locs[:, :3])
+        chan_angles = locs[:, 3:]
+        info['chs'] = []
+        for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
+                                          chan_angles), 1):
+            ch_name, ch_loc, ch_angles = ch_info
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idx
+            chan_info['scanno'] = idx
+            chan_info['range'] = KIT.RANGE
+            chan_info['unit_mul'] = KIT.UNIT_MUL
+            chan_info['ch_name'] = ch_name
+            chan_info['unit'] = FIFF.FIFF_UNIT_T
+            chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+            if idx <= sqd['nmegchan']:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
+                chan_info['kind'] = FIFF.FIFFV_MEG_CH
+            else:
+                chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
+                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
+
+            # create three orthogonal vector
+            # ch_angles[0]: theta, ch_angles[1]: phi
+            ch_angles = np.radians(ch_angles)
+            x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
+            y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
+            z = np.cos(ch_angles[0])
+            vec_z = np.array([x, y, z])
+            length = linalg.norm(vec_z)
+            vec_z /= length
+            vec_x = np.zeros(vec_z.size, dtype=np.float)
+            if vec_z[1] < vec_z[2]:
+                if vec_z[0] < vec_z[1]:
+                    vec_x[0] = 1.0
+                else:
+                    vec_x[1] = 1.0
+            elif vec_z[0] < vec_z[2]:
+                vec_x[0] = 1.0
+            else:
+                vec_x[2] = 1.0
+            vec_x -= np.sum(vec_x * vec_z) * vec_z
+            length = linalg.norm(vec_x)
+            vec_x /= length
+            vec_y = np.cross(vec_z, vec_x)
+            # transform to Neuromag like coordinate space
+            vecs = np.vstack((vec_x, vec_y, vec_z))
+            vecs = apply_trans(als_ras_trans, vecs)
+            chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
+            info['chs'].append(chan_info)
+
+        # label trigger and misc channels
+        for idy, ch_name in enumerate(ch_names['MISC'],
+                                      sqd['n_sens'] + 1):
+            chan_info = {}
+            chan_info['cal'] = KIT.CALIB_FACTOR
+            chan_info['logno'] = idy
+            chan_info['scanno'] = idy
+            chan_info['range'] = 1.0
+            chan_info['unit'] = FIFF.FIFF_UNIT_V
+            chan_info['unit_mul'] = 0
+            chan_info['ch_name'] = ch_name
+            chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
+            chan_info['loc'] = np.zeros(12)
+            chan_info['kind'] = FIFF.FIFFV_MISC_CH
+            info['chs'].append(chan_info)
+
+        info['ch_names'] = ch_names['MEG'] + ch_names['MISC']
+
+    return info, sqd
 
 
 def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
@@ -678,7 +763,65 @@ def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
         If False, data are not read until save.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : Instance of RawKIT
+        A Raw object containing KIT data.
+
+    See Also
+    --------
+    mne.io.Raw : Documentation of attribute and methods.
     """
     return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
                   stim=stim, slope=slope, stimthresh=stimthresh,
                   preload=preload, verbose=verbose)
+
+
+def read_epochs_kit(input_fname, events, event_id=None,
+                    mrk=None, elp=None, hsp=None, verbose=None):
+    """Reader function for KIT epochs files
+
+    Parameters
+    ----------
+    input_fname : str
+        Path to the sqd file.
+    events : array, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be marked as 'IGNORED' in the drop log.
+    event_id : int | list of int | dict | None
+        The id of the event to consider. If dict,
+        the keys can later be used to acces associated events. Example:
+        dict(auditory=1, visual=3). If int, a dict will be created with
+        the id as string. If a list, all events with the IDs specified
+        in the list are used. If None, all events will be used with
+        and a dict is created with string integer names corresponding
+        to the event id integers.
+    mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
+        Marker points representing the location of the marker coils with
+        respect to the MEG Sensors, or path to a marker file.
+        If list, all of the markers will be averaged together.
+    elp : None | str | array_like, shape = (8, 3)
+        Digitizer points representing the location of the fiducials and the
+        marker coils with respect to the digitized head shape, or path to a
+        file containing these points.
+    hsp : None | str | array, shape = (n_points, 3)
+        Digitizer head shape points, or path to head shape file. If more than
+        10`000 points are in the head shape, they are automatically decimated.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    epochs : instance of Epochs
+        The epochs.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    epochs = EpochsKIT(input_fname=input_fname, events=events,
+                       event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
+                       verbose=verbose)
+    return epochs
diff --git a/mne/io/kit/tests/data/test-epoch.raw b/mne/io/kit/tests/data/test-epoch.raw
new file mode 100644
index 0000000..42a6658
Binary files /dev/null and b/mne/io/kit/tests/data/test-epoch.raw differ
diff --git a/mne/io/kit/tests/data/test-eve.txt b/mne/io/kit/tests/data/test-eve.txt
new file mode 100644
index 0000000..c505110
--- /dev/null
+++ b/mne/io/kit/tests/data/test-eve.txt
@@ -0,0 +1,2 @@
+   279      0 128
+  1562      0 128
diff --git a/mne/io/kit/tests/test_coreg.py b/mne/io/kit/tests/test_coreg.py
index 46f189f..f117d99 100644
--- a/mne/io/kit/tests/test_coreg.py
+++ b/mne/io/kit/tests/test_coreg.py
@@ -5,70 +5,26 @@
 import inspect
 import os
 
-import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_equal
+from numpy.testing import assert_array_equal
 
-from mne.io.kit import read_hsp, write_hsp, read_mrk, write_mrk
-from mne.coreg import get_ras_to_neuromag_trans
-from mne.transforms import apply_trans, rotation, translation
+from mne.io.kit import read_mrk
+from mne.io.meas_info import _write_dig_points
 from mne.utils import _TempDir
 
 
 FILE = inspect.getfile(inspect.currentframe())
 parent_dir = os.path.dirname(os.path.abspath(FILE))
 data_dir = os.path.join(parent_dir, 'data')
-hsp_fname = os.path.join(data_dir, 'test_hsp.txt')
 mrk_fname = os.path.join(data_dir, 'test_mrk.sqd')
-tempdir = _TempDir()
-
-
-def test_io_hsp():
-    """Test IO for hsp files"""
-    pts = read_hsp(hsp_fname)
-
-    dest = os.path.join(tempdir, 'test.txt')
-    write_hsp(dest, pts)
-    pts1 = read_hsp(dest)
-    assert_array_equal(pts, pts1, "Hsp points diverged after writing and "
-                       "reading.")
 
 
 def test_io_mrk():
     """Test IO for mrk files"""
+    tempdir = _TempDir()
     pts = read_mrk(mrk_fname)
 
-    # pickle
-    path = os.path.join(tempdir, "mrk.pickled")
-    write_mrk(path, pts)
-    pts_2 = read_mrk(path)
-    assert_array_equal(pts, pts_2, "read/write with pickle")
-
     # txt
     path = os.path.join(tempdir, 'mrk.txt')
-    write_mrk(path, pts)
+    _write_dig_points(path, pts)
     pts_2 = read_mrk(path)
     assert_array_equal(pts, pts_2, "read/write mrk to text")
-
-
-def test_hsp_trans():
-    """Test the coordinate transformation for hsp files"""
-    # create model points in neuromag-like space
-    anterior = [0, 1, 0]
-    left = [-1, 0, 0]
-    right = [.8, 0, 0]
-    up = [0, 0, 1]
-    rand_pts = np.random.uniform(-1, 1, (3, 3))
-    pts = np.vstack((anterior, left, right, up, rand_pts))
-
-    # change coord system
-    rx, ry, rz, tx, ty, tz = np.random.uniform(-2 * np.pi, 2 * np.pi, 6)
-    trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
-    pts_changed = apply_trans(trans, pts)
-
-    # transform back into original space
-    nas, lpa, rpa = pts_changed[:3]
-    hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
-    pts_restored = apply_trans(hsp_trans, pts_changed)
-
-    assert_array_almost_equal(pts_restored, pts, 6, "Neuromag transformation "
-                              "failed")
diff --git a/mne/io/kit/tests/test_kit.py b/mne/io/kit/tests/test_kit.py
index 1d11cf1..72b3028 100644
--- a/mne/io/kit/tests/test_kit.py
+++ b/mne/io/kit/tests/test_kit.py
@@ -1,7 +1,7 @@
 """Data and Channel Location Equivalence Tests"""
 from __future__ import print_function
 
-# Author: Teon Brooks <teon at nyu.edu>
+# Author: Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -9,33 +9,52 @@ import os.path as op
 import inspect
 import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_equal, assert_raises, assert_true
 import scipy.io
-from mne.utils import _TempDir
-from mne import pick_types
+
+from mne import pick_types, concatenate_raws, Epochs, read_events
+from mne.utils import _TempDir, run_tests_if_main
 from mne.io import Raw
-from mne.io import read_raw_kit
+from mne.io import read_raw_kit, read_epochs_kit
 from mne.io.kit.coreg import read_sns
+from mne.io.tests.test_raw import _test_concat
 
 FILE = inspect.getfile(inspect.currentframe())
 parent_dir = op.dirname(op.abspath(FILE))
 data_dir = op.join(parent_dir, 'data')
 sqd_path = op.join(data_dir, 'test.sqd')
+epochs_path = op.join(data_dir, 'test-epoch.raw')
+events_path = op.join(data_dir, 'test-eve.txt')
 mrk_path = op.join(data_dir, 'test_mrk.sqd')
 mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
 mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
 elp_path = op.join(data_dir, 'test_elp.txt')
 hsp_path = op.join(data_dir, 'test_hsp.txt')
 
-tempdir = _TempDir()
+
+def test_concat():
+    """Test EDF concatenation
+    """
+    _test_concat(read_raw_kit, sqd_path)
 
 
 def test_data():
     """Test reading raw kit files
     """
+    assert_raises(TypeError, read_raw_kit, epochs_path)
+    assert_raises(TypeError, read_epochs_kit, sqd_path)
+    assert_raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_path)
+    assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
+                  list(range(200, 190, -1)))
+    assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
+                  list(range(167, 159, -1)), '*', 1, True)
+    # check functionality
+    _ = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_path,
+                     hsp_path)
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path,
                           stim=list(range(167, 159, -1)), slope='+',
                           stimthresh=1, preload=True)
-    print(repr(raw_py))
+    assert_true('RawKIT' in repr(raw_py))
 
     # Binary file only stores the sensor channels
     py_picks = pick_types(raw_py.info, exclude='bads')
@@ -57,10 +76,25 @@ def test_data():
     data_py, _ = raw_py[py_picks]
     assert_array_almost_equal(data_py, data_bin)
 
+    # Make sure concatenation works
+    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
+    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
+
+
+def test_epochs():
+    raw = read_raw_kit(sqd_path, stim=None)
+    events = read_events(events_path)
+    raw_epochs = Epochs(raw, events, None, tmin=0, tmax=.099, baseline=None)
+    data1 = raw_epochs.get_data()
+    epochs = read_epochs_kit(epochs_path, events_path)
+    data11 = epochs.get_data()
+    assert_array_equal(data1, data11)
+
 
 def test_read_segment():
     """Test writing raw kit files when preload is False
     """
+    tempdir = _TempDir()
     raw1 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                         preload=False)
     raw1_file = op.join(tempdir, 'test1-raw.fif')
@@ -69,12 +103,25 @@ def test_read_segment():
                         preload=True)
     raw2_file = op.join(tempdir, 'test2-raw.fif')
     raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
+    data1, times1 = raw1[0, 0:1]
+
     raw1 = Raw(raw1_file, preload=True)
     raw2 = Raw(raw2_file, preload=True)
     assert_array_equal(raw1._data, raw2._data)
+    data2, times2 = raw2[0, 0:1]
+    assert_array_almost_equal(data1, data2)
+    assert_array_almost_equal(times1, times2)
     raw3 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
                         preload=True)
     assert_array_almost_equal(raw1._data, raw3._data)
+    raw4 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=False)
+    raw4.load_data()
+    buffer_fname = op.join(tempdir, 'buffer')
+    assert_array_almost_equal(raw1._data, raw4._data)
+    raw5 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+                        preload=buffer_fname)
+    assert_array_almost_equal(raw1._data, raw5._data)
 
 
 def test_ch_loc():
@@ -83,7 +130,7 @@ def test_ch_loc():
     raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<')
     raw_bin = Raw(op.join(data_dir, 'test_bin_raw.fif'))
 
-    ch_py = raw_py._sqd_params['sensor_locs'][:, :5]
+    ch_py = raw_py._raw_extras[0]['sensor_locs'][:, :5]
     # ch locs stored as m, not mm
     ch_py[:, :3] *= 1e3
     ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
@@ -95,13 +142,10 @@ def test_ch_loc():
         if bin_ch['ch_name'].startswith('MEG'):
             # the stored ch locs have more precision than the sns.txt
             assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)
-            assert_array_almost_equal(py_ch['coil_trans'],
-                                      bin_ch['coil_trans'],
-                                      decimal=2)
 
     # test when more than one marker file provided
     mrks = [mrk_path, mrk2_path, mrk3_path]
-    _ = read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
+    read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
 
 
 def test_stim_ch():
@@ -114,3 +158,6 @@ def test_stim_ch():
     stim1, _ = raw[stim_pick]
     stim2 = np.array(raw.read_stim_ch(), ndmin=2)
     assert_array_equal(stim1, stim2)
+
+
+run_tests_if_main()
diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py
index 802f633..f8f3928 100644
--- a/mne/io/meas_info.py
+++ b/mne/io/meas_info.py
@@ -1,36 +1,44 @@
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
 from warnings import warn
 from copy import deepcopy
+from datetime import datetime as dt
 import os.path as op
+
 import numpy as np
 from scipy import linalg
-from ..externals.six import BytesIO, string_types
-from datetime import datetime as dt
 
+from .pick import channel_type
 from .constants import FIFF
 from .open import fiff_open
-from .tree import dir_tree_find, copy_tree
+from .tree import dir_tree_find
 from .tag import read_tag, find_tag
 from .proj import _read_proj, _write_proj, _uniquify_projs
 from .ctf import read_ctf_comp, write_ctf_comp
 from .write import (start_file, end_file, start_block, end_block,
                     write_string, write_dig_point, write_float, write_int,
                     write_coord_trans, write_ch_info, write_name_list,
-                    write_julian)
+                    write_julian, write_float_matrix)
+from .proc_history import _read_proc_history, _write_proc_history
 from ..utils import logger, verbose
+from ..fixes import Counter
+from .. import __version__
+from ..externals.six import b, BytesIO, string_types, text_type
+
 
 _kind_dict = dict(
-    eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),
     mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),
     grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),
     misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),
     stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
     eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
     ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
+    seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),
 )
 
 
@@ -40,9 +48,116 @@ def _summarize_str(st):
 
 
 class Info(dict):
-    """ Info class to nicely represent info dicts
+    """Information about the recording.
+
+    This data structure behaves like a dictionary. It contains all meta-data
+    that is available for a recording.
+
+    The attributes listed below are the possible dictionary entries:
+
+    Attributes
+    ----------
+    bads : list of str
+        List of bad (noisy/broken) channels, by name. These channels will by
+        default be ignored by many processing steps.
+    ch_names : list of str
+        The names of the channels.
+    chs : list of dict
+        A list of channel information structures.
+        See: :ref:`faq` for details.
+    comps : list of dict
+        CTF software gradient compensation data.
+        See: :ref:`faq` for details.
+    custom_ref_applied : bool
+        Whether a custom (=other than average) reference has been applied to
+        the EEG data. This flag is checked by some algorithms that require an
+        average reference to be set.
+    events : list of dict
+        Event list, usually extracted from the stim channels.
+        See: :ref:`faq` for details.
+    hpi_results : list of dict
+        Head position indicator (HPI) digitization points.
+        See: :ref:`faq` for details.
+    meas_date : list of int
+        The first element of this list is a POSIX timestamp (milliseconds since
+        1970-01-01 00:00:00) denoting the date and time at which the
+        measurement was taken.
+        TODO: what are the other fields?
+    nchan : int
+        Number of channels.
+    projs : list of dict
+        List of SSP operators that operate on the data.
+        See: :ref:`faq` for details.
+    sfreq : float
+        Sampling frequency in Hertz.
+        See: :ref:`faq` for details.
+    acq_pars : str | None
+        MEG system acquition parameters.
+    acq_stim : str | None
+        TODO: What is this?
+    buffer_size_sec : float | None
+        Buffer size (in seconds) when reading the raw data in chunks.
+    ctf_head_t : dict | None
+        The transformation from 4D/CTF head coordinates to Neuromag head
+        coordinates. This is only present in 4D/CTF data.
+        See: :ref:`faq` for details.
+    description : str | None
+        String description of the recording.
+    dev_ctf_t : dict | None
+        The transformation from device coordinates to 4D/CTF head coordinates.
+        This is only present in 4D/CTF data.
+        See: :ref:`faq` for details.
+    dev_head_t : dict | None
+        The device to head transformation.
+        See: :ref:`faq` for details.
+    dig : list of dict | None
+        The Polhemus digitization data in head coordinates.
+        See: :ref:`faq` for details.
+    experimentor : str | None
+        Name of the person that ran the experiment.
+    file_id : dict | None
+        The fif ID datastructure of the measurement file.
+        See: :ref:`faq` for details.
+    filename : str | None
+        The name of the file that provided the raw data.
+    highpass : float | None
+        Highpass corner frequency in Hertz. Zero indicates a DC recording.
+    hpi_meas : list of dict | None
+        HPI measurements.
+        TODO: What is this exactly?
+    hpi_subsystem: | None
+        TODO: What is this?
+    line_freq : float | None
+        Frequency of the power line in Hertz.
+    lowpass : float | None
+        Lowpass corner frequency in Hertz.
+    meas_id : dict | None
+        The ID assigned to this measurement by the acquisition system or during
+        file conversion.
+        See: :ref:`faq` for details.
+    proj_id : int | None
+        ID number of the project the experiment belongs to.
+    proj_name : str | None
+        Name of the project the experiment belongs to.
+    subject_info : dict | None
+        Information about the subject.
+        See: :ref:`subject_info` for details
+    proc_history : list of dict | None | not present in dict
+        The SSS info, the CTC correction and the calibaraions from the SSS
+        processing logs inside of a raw file.
+        See: :ref:`faq` for details.
     """
 
+    def copy(self):
+        """Copy the instance
+
+        Returns
+        -------
+        info : instance of Info
+            The copied info.
+        """
+        return Info(super(Info, self).copy())
+
     def __repr__(self):
         """Summarize info instead of printing all"""
         strs = ['<Info | %s non-empty fields']
@@ -67,12 +182,18 @@ class Info(dict):
                 entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
             else:
                 this_len = (len(v) if hasattr(v, '__len__') else
-                           ('%s' % v if v is not None else None))
+                            ('%s' % v if v is not None else None))
                 entr = (('%d items' % this_len) if isinstance(this_len, int)
                         else ('%s' % this_len if this_len else ''))
             if entr:
                 non_empty += 1
                 entr = ' | ' + entr
+            if k == 'chs':
+                ch_types = [channel_type(self, idx) for idx in range(len(v))]
+                ch_counts = Counter(ch_types)
+                entr += " (%s)" % ', '.join("%s: %d" % (ch_type.upper(), count)
+                                            for ch_type, count
+                                            in ch_counts.items())
             strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))
         strs_non_empty = sorted(s for s in strs if '|' in s)
         strs_empty = sorted(s for s in strs if '|' not in s)
@@ -85,10 +206,32 @@ class Info(dict):
         if self.get('subject_info') is not None:
             del self['subject_info']
 
+    def _check_consistency(self):
+        """Do some self-consistency checks and datatype tweaks"""
+        missing = [bad for bad in self['bads'] if bad not in self['ch_names']]
+        if len(missing) > 0:
+            raise RuntimeError('bad channel(s) %s marked do not exist in info'
+                               % (missing,))
+        chs = [ch['ch_name'] for ch in self['chs']]
+        if len(self['ch_names']) != len(chs) or any(
+                ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \
+                self['nchan'] != len(chs):
+            raise RuntimeError('info channel name inconsistency detected, '
+                               'please notify mne-python developers')
+        # make sure we have the proper datatypes
+        for key in ('sfreq', 'highpass', 'lowpass'):
+            if self.get(key) is not None:
+                self[key] = float(self[key])
+
 
 def read_fiducials(fname):
     """Read fiducials from a fiff file
 
+    Parameters
+    ----------
+    fname : str
+        The filename to read.
+
     Returns
     -------
     pts : list of dicts
@@ -156,6 +299,144 @@ def write_fiducials(fname, pts, coord_frame=0):
     end_file(fid)
 
 
+def _read_dig_points(fname, comments='%'):
+    """Read digitizer data from file.
+
+    This function can read space-delimited text files of digitizer data.
+
+    Parameters
+    ----------
+    fname : str
+        The filepath of space delimited file with points.
+    comments : str
+        The character used to indicate the start of a comment;
+        Default: '%'.
+
+    Returns
+    -------
+    dig_points : np.ndarray, shape (n_points, 3)
+        Array of dig points.
+    """
+    dig_points = np.loadtxt(fname, comments=comments, ndmin=2)
+    if dig_points.shape[-1] != 3:
+        err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,)
+        raise ValueError(err)
+
+    return dig_points
+
+
+def _write_dig_points(fname, dig_points):
+    """Write points to file
+
+    Parameters
+    ----------
+    fname : str
+        Path to the file to write. The kind of file to write is determined
+        based on the extension: '.txt' for tab separated text file.
+    dig_points : numpy.ndarray, shape (n_points, 3)
+        Points.
+    """
+    _, ext = op.splitext(fname)
+    dig_points = np.asarray(dig_points)
+    if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):
+        err = ("Points must be of shape (n_points, 3), "
+               "not %s" % (dig_points.shape,))
+        raise ValueError(err)
+
+    if ext == '.txt':
+        with open(fname, 'wb') as fid:
+            version = __version__
+            now = dt.now().strftime("%I:%M%p on %B %d, %Y")
+            fid.write(b("% Ascii 3D points file created by mne-python version "
+                        "{version} at {now}\n".format(version=version,
+                                                      now=now)))
+            fid.write(b("% {N} 3D points, "
+                        "x y z per line\n".format(N=len(dig_points))))
+            np.savetxt(fid, dig_points, delimiter='\t', newline='\n')
+    else:
+        msg = "Unrecognized extension: %r. Need '.txt'." % ext
+        raise ValueError(msg)
+
+
+def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,
+                     dig_points=None):
+    """Constructs digitizer info for the info.
+
+    Parameters
+    ----------
+    nasion : array-like | numpy.ndarray, shape (3,) | None
+        Point designated as the nasion point.
+    lpa : array-like |  numpy.ndarray, shape (3,) | None
+        Point designated as the left auricular point.
+    rpa : array-like |  numpy.ndarray, shape (3,) | None
+        Point designated as the right auricular point.
+    hpi : array-like | numpy.ndarray, shape (n_points, 3) | None
+        Points designated as head position indicator points.
+    dig_points : array-like | numpy.ndarray, shape (n_points, 3)
+        Points designed as the headshape points.
+
+    Returns
+    -------
+    dig : list
+        List of digitizer points to be added to the info['dig'].
+    """
+    dig = []
+    if nasion is not None:
+        nasion = np.asarray(nasion)
+        if nasion.shape == (3,):
+            dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('Nasion should have the shape (3,) instead of %s'
+                   % (nasion.shape,))
+            raise ValueError(msg)
+    if lpa is not None:
+        lpa = np.asarray(lpa)
+        if lpa.shape == (3,):
+            dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('LPA should have the shape (3,) instead of %s'
+                   % (lpa.shape,))
+            raise ValueError(msg)
+    if rpa is not None:
+        rpa = np.asarray(rpa)
+        if rpa.shape == (3,):
+            dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
+                        'kind': FIFF.FIFFV_POINT_CARDINAL,
+                        'coord_frame':  FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('RPA should have the shape (3,) instead of %s'
+                   % (rpa.shape,))
+            raise ValueError(msg)
+    if hpi is not None:
+        hpi = np.asarray(hpi)
+        if hpi.shape[1] == 3:
+            for idx, point in enumerate(hpi):
+                dig.append({'r': point, 'ident': idx,
+                            'kind': FIFF.FIFFV_POINT_HPI,
+                            'coord_frame': FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('HPI should have the shape (n_points, 3) instead of '
+                   '%s' % (hpi.shape,))
+            raise ValueError(msg)
+    if dig_points is not None:
+        dig_points = np.asarray(dig_points)
+        if dig_points.shape[1] == 3:
+            for idx, point in enumerate(dig_points):
+                dig.append({'r': point, 'ident': idx,
+                            'kind': FIFF.FIFFV_POINT_EXTRA,
+                            'coord_frame': FIFF.FIFFV_COORD_HEAD})
+        else:
+            msg = ('Points should have the shape (n_points, 3) instead of '
+                   '%s' % (dig_points.shape,))
+            raise ValueError(msg)
+
+    return dig
+
+
 @verbose
 def read_info(fname, verbose=None):
     """Read measurement info from a file
@@ -225,6 +506,7 @@ def read_meas_info(fid, tree, verbose=None):
     meas : dict
         Node in tree that contains the info.
     """
+
     #   Find the desired blocks
     meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
     if len(meas) == 0:
@@ -243,6 +525,7 @@ def read_meas_info(fid, tree, verbose=None):
     #   Read measurement info
     dev_head_t = None
     ctf_head_t = None
+    dev_ctf_t = None
     meas_date = None
     highpass = None
     lowpass = None
@@ -254,6 +537,7 @@ def read_meas_info(fid, tree, verbose=None):
     proj_id = None
     proj_name = None
     line_freq = None
+    custom_ref_applied = False
     p = 0
     for k in range(meas_info['nent']):
         kind = meas_info['directory'][k].kind
@@ -280,12 +564,16 @@ def read_meas_info(fid, tree, verbose=None):
         elif kind == FIFF.FIFF_COORD_TRANS:
             tag = read_tag(fid, pos)
             cand = tag.data
+
             if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
-                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                    cand['to'] == FIFF.FIFFV_COORD_HEAD:
                 dev_head_t = cand
             elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
-                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
+                    cand['to'] == FIFF.FIFFV_COORD_HEAD:
                 ctf_head_t = cand
+            elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \
+                    cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
+                dev_ctf_t = cand
         elif kind == FIFF.FIFF_EXPERIMENTER:
             tag = read_tag(fid, pos)
             experimenter = tag.data
@@ -301,6 +589,9 @@ def read_meas_info(fid, tree, verbose=None):
         elif kind == FIFF.FIFF_LINE_FREQ:
             tag = read_tag(fid, pos)
             line_freq = float(tag.data)
+        elif kind == FIFF.FIFF_CUSTOM_REF:
+            tag = read_tag(fid, pos)
+            custom_ref_applied = bool(tag.data)
 
     # Check that we have everything we need
     if nchan is None:
@@ -326,13 +617,14 @@ def read_meas_info(fid, tree, verbose=None):
                     tag = read_tag(fid, pos)
                     cand = tag.data
                     if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and
-                        cand['to'] == FIFF.FIFFV_COORD_HEAD and
-                        dev_head_t is None):
+                            cand['to'] == FIFF.FIFFV_COORD_HEAD and
+                            dev_head_t is None):
                         dev_head_t = cand
                     elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and
                           cand['to'] == FIFF.FIFFV_COORD_HEAD and
                           ctf_head_t is None):
                         ctf_head_t = cand
+
     #   Locate the Polhemus data
     isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
     dig = None
@@ -384,7 +676,97 @@ def read_meas_info(fid, tree, verbose=None):
     else:
         info = Info(file_id=None)
 
+    #   Locate events list
+    events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)
+    evs = list()
+    for event in events:
+        ev = dict()
+        for k in range(event['nent']):
+            kind = event['directory'][k].kind
+            pos = event['directory'][k].pos
+            if kind == FIFF.FIFF_EVENT_CHANNELS:
+                ev['channels'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_EVENT_LIST:
+                ev['list'] = read_tag(fid, pos).data
+        evs.append(ev)
+    info['events'] = evs
+
+    #   Locate HPI result
+    hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
+    hrs = list()
+    for hpi_result in hpi_results:
+        hr = dict()
+        hr['dig_points'] = []
+        for k in range(hpi_result['nent']):
+            kind = hpi_result['directory'][k].kind
+            pos = hpi_result['directory'][k].pos
+            if kind == FIFF.FIFF_DIG_POINT:
+                hr['dig_points'].append(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:
+                hr['order'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_COILS_USED:
+                hr['used'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:
+                hr['moments'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:
+                hr['goodness'] = read_tag(fid, pos).data
+            elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:
+                hr['good_limit'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:
+                hr['dist_limit'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:
+                hr['accept'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_COORD_TRANS:
+                hr['coord_trans'] = read_tag(fid, pos).data
+        hrs.append(hr)
+    info['hpi_results'] = hrs
+
+    #   Locate HPI Measurement
+    hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)
+    hms = list()
+    for hpi_meas in hpi_meass:
+        hm = dict()
+        for k in range(hpi_meas['nent']):
+            kind = hpi_meas['directory'][k].kind
+            pos = hpi_meas['directory'][k].pos
+            if kind == FIFF.FIFF_CREATOR:
+                hm['creator'] = text_type(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_SFREQ:
+                hm['sfreq'] = float(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_NCHAN:
+                hm['nchan'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_NAVE:
+                hm['nave'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_HPI_NCOIL:
+                hm['ncoil'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_FIRST_SAMPLE:
+                hm['first_samp'] = int(read_tag(fid, pos).data)
+            elif kind == FIFF.FIFF_LAST_SAMPLE:
+                hm['last_samp'] = int(read_tag(fid, pos).data)
+        hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)
+        hcs = []
+        for hpi_coil in hpi_coils:
+            hc = dict()
+            for k in range(hpi_coil['nent']):
+                kind = hpi_coil['directory'][k].kind
+                pos = hpi_coil['directory'][k].pos
+                if kind == FIFF.FIFF_HPI_COIL_NO:
+                    hc['number'] = int(read_tag(fid, pos).data)
+                elif kind == FIFF.FIFF_EPOCH:
+                    hc['epoch'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_SLOPES:
+                    hc['slopes'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_CORR_COEFF:
+                    hc['corr_coeff'] = read_tag(fid, pos).data
+                elif kind == FIFF.FIFF_HPI_COIL_FREQ:
+                    hc['coil_freq'] = read_tag(fid, pos).data
+            hcs.append(hc)
+        hm['hpi_coils'] = hcs
+        hms.append(hm)
+    info['hpi_meas'] = hms
+
     subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
+    si = None
     if len(subject_info) == 1:
         subject_info = subject_info[0]
         si = dict()
@@ -396,13 +778,16 @@ def read_meas_info(fid, tree, verbose=None):
                 si['id'] = int(tag.data)
             elif kind == FIFF.FIFF_SUBJ_HIS_ID:
                 tag = read_tag(fid, pos)
-                si['his_id'] = str(tag.data)
+                si['his_id'] = text_type(tag.data)
             elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
                 tag = read_tag(fid, pos)
-                si['last_name'] = str(tag.data)
+                si['last_name'] = text_type(tag.data)
             elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
                 tag = read_tag(fid, pos)
-                si['first_name'] = str(tag.data)
+                si['first_name'] = text_type(tag.data)
+            elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:
+                tag = read_tag(fid, pos)
+                si['middle_name'] = text_type(tag.data)
             elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
                 tag = read_tag(fid, pos)
                 si['birthday'] = tag.data
@@ -412,12 +797,38 @@ def read_meas_info(fid, tree, verbose=None):
             elif kind == FIFF.FIFF_SUBJ_HAND:
                 tag = read_tag(fid, pos)
                 si['hand'] = int(tag.data)
-    else:
-        si = None
     info['subject_info'] = si
 
-    #   Load extra information blocks
-    read_extra_meas_info(fid, tree, info)
+    hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)
+    hs = None
+    if len(hpi_subsystem) == 1:
+        hpi_subsystem = hpi_subsystem[0]
+        hs = dict()
+        for k in range(hpi_subsystem['nent']):
+            kind = hpi_subsystem['directory'][k].kind
+            pos = hpi_subsystem['directory'][k].pos
+            if kind == FIFF.FIFF_HPI_NCOIL:
+                tag = read_tag(fid, pos)
+                hs['ncoil'] = int(tag.data)
+            elif kind == FIFF.FIFF_EVENT_CHANNEL:
+                tag = read_tag(fid, pos)
+                hs['event_channel'] = text_type(tag.data)
+            hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)
+            hc = []
+            for coil in hpi_coils:
+                this_coil = dict()
+                for j in range(coil['nent']):
+                    kind = coil['directory'][j].kind
+                    pos = coil['directory'][j].pos
+                    if kind == FIFF.FIFF_EVENT_BITS:
+                        tag = read_tag(fid, pos)
+                        this_coil['event_bits'] = np.array(tag.data)
+                hc.append(this_coil)
+            hs['hpi_coils'] = hc
+    info['hpi_subsystem'] = hs
+
+    #   Read processing history
+    _read_proc_history(fid, tree, info)
 
     #  Make the most appropriate selection for the measurement id
     if meas_info['parent_id'] is None:
@@ -460,14 +871,12 @@ def read_meas_info(fid, tree, verbose=None):
     #
     info['dev_head_t'] = dev_head_t
     info['ctf_head_t'] = ctf_head_t
-    if dev_head_t is not None and ctf_head_t is not None:
+    info['dev_ctf_t'] = dev_ctf_t
+    if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:
+        from ..transforms import Transform
         head_ctf_trans = linalg.inv(ctf_head_t['trans'])
         dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
-        info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE,
-                             'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD,
-                             'trans': dev_ctf_trans}
-    else:
-        info['dev_ctf_t'] = None
+        info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)
 
     #   All kinds of auxliary stuff
     info['dig'] = dig
@@ -476,40 +885,12 @@ def read_meas_info(fid, tree, verbose=None):
     info['comps'] = comps
     info['acq_pars'] = acq_pars
     info['acq_stim'] = acq_stim
+    info['custom_ref_applied'] = custom_ref_applied
+    info._check_consistency()
 
     return info, meas
 
 
-def read_extra_meas_info(fid, tree, info):
-    """Read extra blocks from fid"""
-    # current method saves them into a BytesIO file instance for simplicity
-    # this and its partner, write_extra_meas_info, could be made more
-    # comprehensive (i.e.., actually parse and read the data instead of
-    # just storing it for later)
-    blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
-              FIFF.FIFFB_PROCESSING_HISTORY]
-    info['orig_blocks'] = dict(blocks=blocks)
-    fid_bytes = BytesIO()
-    start_file(fid_bytes, tree['id'])
-    start_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
-    for block in info['orig_blocks']['blocks']:
-        nodes = dir_tree_find(tree, block)
-        copy_tree(fid, tree['id'], nodes, fid_bytes)
-    end_block(fid_bytes, FIFF.FIFFB_MEAS_INFO)
-    info['orig_blocks']['bytes'] = fid_bytes.getvalue()
-
-
-def write_extra_meas_info(fid, info):
-    """Write otherwise left out blocks of data"""
-    # uses BytesIO fake file to read the appropriate blocks
-    if 'orig_blocks' in info and info['orig_blocks'] is not None:
-        # Blocks from the original
-        fid_bytes, tree, _ = fiff_open(BytesIO(info['orig_blocks']['bytes']))
-        for block in info['orig_blocks']['blocks']:
-            nodes = dir_tree_find(tree, block)
-            copy_tree(fid_bytes, tree['id'], nodes, fid)
-
-
 def write_meas_info(fid, info, data_type=None, reset_range=True):
     """Write measurement info into a file id (from a fif file)
 
@@ -530,12 +911,80 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
     -----
     Tags are written in a particular order for compatibility with maxfilter.
     """
+    info._check_consistency()
 
     # Measurement info
     start_block(fid, FIFF.FIFFB_MEAS_INFO)
 
-    #   Extra measurement info
-    write_extra_meas_info(fid, info)
+    for event in info['events']:
+        start_block(fid, FIFF.FIFFB_EVENTS)
+        if event.get('channels') is not None:
+            write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])
+        if event.get('list') is not None:
+            write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])
+        end_block(fid, FIFF.FIFFB_EVENTS)
+
+    #   HPI Result
+    for hpi_result in info['hpi_results']:
+        start_block(fid, FIFF.FIFFB_HPI_RESULT)
+        for d in hpi_result['dig_points']:
+            write_dig_point(fid, d)
+        if 'order' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,
+                      hpi_result['order'])
+        if 'used' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])
+        if 'moments' in hpi_result:
+            write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,
+                               hpi_result['moments'])
+        if 'goodness' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,
+                        hpi_result['goodness'])
+        if 'good_limit' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
+                        hpi_result['good_limit'])
+        if 'dist_limit' in hpi_result:
+            write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,
+                        hpi_result['dist_limit'])
+        if 'accept' in hpi_result:
+            write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])
+        if 'coord_trans' in hpi_result:
+            write_coord_trans(fid, hpi_result['coord_trans'])
+        end_block(fid, FIFF.FIFFB_HPI_RESULT)
+
+    #   HPI Measurement
+    for hpi_meas in info['hpi_meas']:
+        start_block(fid, FIFF.FIFFB_HPI_MEAS)
+        if hpi_meas.get('creator') is not None:
+            write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])
+        if hpi_meas.get('sfreq') is not None:
+            write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])
+        if hpi_meas.get('nchan') is not None:
+            write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])
+        if hpi_meas.get('nave') is not None:
+            write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])
+        if hpi_meas.get('ncoil') is not None:
+            write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])
+        if hpi_meas.get('first_samp') is not None:
+            write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])
+        if hpi_meas.get('last_samp') is not None:
+            write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])
+        for hpi_coil in hpi_meas['hpi_coils']:
+            start_block(fid, FIFF.FIFFB_HPI_COIL)
+            if hpi_coil.get('number') is not None:
+                write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])
+            if hpi_coil.get('epoch') is not None:
+                write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])
+            if hpi_coil.get('slopes') is not None:
+                write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])
+            if hpi_coil.get('corr_coeff') is not None:
+                write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,
+                            hpi_coil['corr_coeff'])
+            if hpi_coil.get('coil_freq') is not None:
+                write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,
+                            hpi_coil['coil_freq'])
+            end_block(fid, FIFF.FIFFB_HPI_COIL)
+        end_block(fid, FIFF.FIFFB_HPI_MEAS)
 
     #   Polhemus data
     if info['dig'] is not None:
@@ -563,6 +1012,9 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
     if info['ctf_head_t'] is not None:
         write_coord_trans(fid, info['ctf_head_t'])
 
+    if info['dev_ctf_t'] is not None:
+        write_coord_trans(fid, info['dev_ctf_t'])
+
     #   Projectors
     _write_proj(fid, info['projs'])
 
@@ -594,6 +1046,8 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
         write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
     if data_type is not None:
         write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
+    if info.get('custom_ref_applied'):
+        write_int(fid, FIFF.FIFF_CUSTOM_REF, info['custom_ref_applied'])
 
     #  Channel information
     for k, c in enumerate(info['chs']):
@@ -617,6 +1071,8 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
             write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
         if si.get('first_name') is not None:
             write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
+        if si.get('middle_name') is not None:
+            write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])
         if si.get('birthday') is not None:
             write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
         if si.get('sex') is not None:
@@ -625,8 +1081,27 @@ def write_meas_info(fid, info, data_type=None, reset_range=True):
             write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
         end_block(fid, FIFF.FIFFB_SUBJECT)
 
+    if info.get('hpi_subsystem') is not None:
+        hs = info['hpi_subsystem']
+        start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
+        if hs.get('ncoil') is not None:
+            write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])
+        if hs.get('event_channel') is not None:
+            write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])
+        if hs.get('hpi_coils') is not None:
+            for coil in hs['hpi_coils']:
+                start_block(fid, FIFF.FIFFB_HPI_COIL)
+                if coil.get('event_bits') is not None:
+                    write_int(fid, FIFF.FIFF_EVENT_BITS,
+                              coil['event_bits'])
+                end_block(fid, FIFF.FIFFB_HPI_COIL)
+        end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
+
     end_block(fid, FIFF.FIFFB_MEAS_INFO)
 
+    #   Processing history
+    _write_proc_history(fid, info)
+
 
 def write_info(fname, info, data_type=None, reset_range=True):
     """Write measurement info in fif file.
@@ -657,8 +1132,8 @@ def _is_equal_dict(dicts):
     is_equal = []
     for d in tests:
         k0, v0 = d[0]
-        is_equal.append(all([np.all(k == k0) and
-                        np.all(v == v0)  for k, v in d]))
+        is_equal.append(all(np.all(k == k0) and
+                        np.all(v == v0) for k, v in d))
     return all(is_equal)
 
 
@@ -694,7 +1169,7 @@ def _merge_dict_values(dicts, key, verbose=None):
             return values[int(idx)]
         elif len(idx) > 1:
             lists = (d[key] for d in dicts if isinstance(d[key], list))
-            return  _flatten(lists)
+            return _flatten(lists)
     # dict
     elif _check_isinstance(values, dict, all):
         is_qual = _is_equal_dict(values)
@@ -710,7 +1185,7 @@ def _merge_dict_values(dicts, key, verbose=None):
             raise RuntimeError(msg)
     # ndarray
     elif _check_isinstance(values, np.ndarray, all):
-        is_qual = all([np.all(values[0] == x) for x in values[1:]])
+        is_qual = all(np.all(values[0] == x) for x in values[1:])
         if is_qual:
             return values[0]
         elif key == 'meas_date':
@@ -744,15 +1219,38 @@ def _merge_dict_values(dicts, key, verbose=None):
 
 @verbose
 def _merge_info(infos, verbose=None):
-    """Merge two measurement info dictionaries"""
+    """Merge multiple measurement info dictionaries.
+
+     - Fields that are present in only one info object will be used in the
+       merged info.
+     - Fields that are present in multiple info objects and are the same
+       will be used in the merged info.
+     - Fields that are present in multiple info objects and are different
+       will result in a None value in the merged info.
+     - Channels will be concatenated. If multiple info objects contain
+       channels with the same name, an exception is raised.
+
+    Parameters
+    ----------
+    infos | list of instance of Info
+        Info objects to merge into one info object.
+    verbose : bool, str, int, or NonIe
+        If not None, override default verbose level (see mne.verbose).
 
+    Returns
+    -------
+    info : instance of Info
+        The merged info object.
+    """
+    for info in infos:
+        info._check_consistency()
     info = Info()
     ch_names = _merge_dict_values(infos, 'ch_names')
     duplicates = set([ch for ch in ch_names if ch_names.count(ch) > 1])
     if len(duplicates) > 0:
-        err = ("The following channels are present in more than one input "
+        msg = ("The following channels are present in more than one input "
                "measurement info objects: %s" % list(duplicates))
-        raise ValueError(err)
+        raise ValueError(msg)
     info['nchan'] = len(ch_names)
     info['ch_names'] = ch_names
     info['chs'] = []
@@ -766,39 +1264,50 @@ def _merge_info(infos, verbose=None):
             info[trans_name] = None
         elif len(trans) == 1:
             info[trans_name] = trans[0]
-        elif all([np.all(trans[0]['trans'] == x['trans']) and
-                  trans[0]['from'] == x['from'] and
-                  trans[0]['to'] == x['to']
-                  for x in trans[1:]]):
+        elif all(np.all(trans[0]['trans'] == x['trans']) and
+                 trans[0]['from'] == x['from'] and
+                 trans[0]['to'] == x['to']
+                 for x in trans[1:]):
             info[trans_name] = trans[0]
         else:
-            err = ("Measurement infos provide mutually inconsistent %s" %
+            msg = ("Measurement infos provide mutually inconsistent %s" %
                    trans_name)
-            raise ValueError(err)
+            raise ValueError(msg)
     other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',
-                    'comps', 'description', 'dig', 'experimenter', 'file_id',
-                    'filename', 'highpass', 'line_freq', 'lowpass',
-                    'meas_date', 'meas_id', 'orig_blocks', 'proj_id',
-                    'proj_name', 'projs', 'sfreq', 'subject_info', 'sfreq']
+                    'comps', 'custom_ref_applied', 'description', 'dig',
+                    'experimenter', 'file_id', 'filename', 'highpass',
+                    'hpi_results', 'hpi_meas', 'hpi_subsystem', 'events',
+                    'line_freq', 'lowpass', 'meas_date', 'meas_id',
+                    'proj_id', 'proj_name', 'projs', 'sfreq',
+                    'subject_info', 'sfreq']
 
     for k in other_fields:
         info[k] = _merge_dict_values(infos, k)
-
+    info._check_consistency()
     return info
 
 
-def create_info(ch_names, sfreq, ch_types=None):
+def create_info(ch_names, sfreq, ch_types=None, montage=None):
     """Create a basic Info instance suitable for use with create_raw
 
     Parameters
     ----------
-    ch_names : list of str
-        Channel names.
+    ch_names : list of str | int
+        Channel names. If an int, a list of channel names will be created
+        from range(ch_names)
     sfreq : float
         Sample rate of the data.
-    ch_types : list of str
+    ch_types : list of str | str
         Channel types. If None, data are assumed to be misc.
         Currently supported fields are "mag", "grad", "eeg", and "misc".
+        If str, then all channels are assumed to be of the same type.
+    montage : None | str | Montage | DigMontage | list
+        A montage containing channel positions. If str or Montage is
+        specified, the channel info will be updated with the channel
+        positions. Default is None. If DigMontage is specified, the
+        digitizer information will be updated. A list of unique montages,
+        can be specifed and applied to the info. See also the documentation of
+        :func:`mne.channels.read_montage` for more information.
 
     Notes
     -----
@@ -806,28 +1315,29 @@ def create_info(ch_names, sfreq, ch_types=None):
     within the rest of the package. Advanced functionality such as source
     localization can only be obtained through substantial, proper
     modifications of the info structure (not recommended).
+
+    Note that the MEG device-to-head transform ``info['dev_head_t']`` will
+    be initialized to the identity transform.
     """
+    if isinstance(ch_names, int):
+        ch_names = list(np.arange(ch_names).astype(str))
     if not isinstance(ch_names, (list, tuple)):
-        raise TypeError('ch_names must be a list or tuple')
+        raise TypeError('ch_names must be a list, tuple, or int')
     sfreq = float(sfreq)
     if sfreq <= 0:
         raise ValueError('sfreq must be positive')
     nchan = len(ch_names)
     if ch_types is None:
         ch_types = ['misc'] * nchan
+    if isinstance(ch_types, string_types):
+        ch_types = [ch_types] * nchan
     if len(ch_types) != nchan:
         raise ValueError('ch_types and ch_names must be the same length')
-    info = Info()
-    info['meas_date'] = [0, 0]
+    info = _empty_info()
+    info['meas_date'] = np.array([0, 0], np.int32)
     info['sfreq'] = sfreq
-    for key in ['bads', 'projs', 'comps']:
-        info[key] = list()
-    for key in ['meas_id', 'file_id', 'highpass', 'lowpass', 'acq_pars',
-                'acq_stim', 'filename', 'dig']:
-        info[key] = None
     info['ch_names'] = ch_names
     info['nchan'] = nchan
-    info['chs'] = list()
     loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)
     for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):
         if not isinstance(name, string_types):
@@ -838,12 +1348,61 @@ def create_info(ch_names, sfreq, ch_types=None):
             raise KeyError('kind must be one of %s, not %s'
                            % (list(_kind_dict.keys()), kind))
         kind = _kind_dict[kind]
-        chan_info = dict(loc=loc, eeg_loc=None, unit_mul=0, range=1., cal=1.,
-                         coil_trans=None, kind=kind[0], coil_type=kind[1],
+        chan_info = dict(loc=loc, unit_mul=0, range=1., cal=1.,
+                         kind=kind[0], coil_type=kind[1],
                          unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
                          ch_name=name, scanno=ci + 1, logno=ci + 1)
         info['chs'].append(chan_info)
-    info['dev_head_t'] = None
-    info['dev_ctf_t'] = None
-    info['ctf_head_t'] = None
+    if montage is not None:
+        from ..channels.montage import (Montage, DigMontage, _set_montage,
+                                        read_montage)
+        if not isinstance(montage, list):
+            montage = [montage]
+        for montage_ in montage:
+            if isinstance(montage_, (Montage, DigMontage)):
+                _set_montage(info, montage_)
+            elif isinstance(montage_, string_types):
+                montage_ = read_montage(montage_)
+                _set_montage(info, montage_)
+            else:
+                raise TypeError('Montage must be an instance of Montage, '
+                                'DigMontage, a list of montages, or filepath, '
+                                'not %s.' % type(montage))
+    return info
+
+
+RAW_INFO_FIELDS = (
+    'acq_pars', 'acq_stim', 'bads', 'buffer_size_sec', 'ch_names', 'chs',
+    'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',
+    'dev_head_t', 'dig', 'experimenter', 'events',
+    'file_id', 'filename', 'highpass', 'hpi_meas', 'hpi_results',
+    'hpi_subsystem', 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'nchan',
+    'proj_id', 'proj_name', 'projs', 'sfreq', 'subject_info',
+)
+
+
+def _empty_info():
+    """Create an empty info dictionary"""
+    from ..transforms import Transform
+    _none_keys = (
+        'acq_pars', 'acq_stim', 'buffer_size_sec', 'ctf_head_t', 'description',
+        'dev_ctf_t', 'dig', 'experimenter',
+        'file_id', 'filename', 'highpass', 'hpi_subsystem', 'line_freq',
+        'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',
+        'subject_info',
+    )
+    _list_keys = (
+        'bads', 'ch_names', 'chs', 'comps', 'events', 'hpi_meas',
+        'hpi_results', 'projs',
+    )
+    info = Info()
+    for k in _none_keys:
+        info[k] = None
+    for k in _list_keys:
+        info[k] = list()
+    info['custom_ref_applied'] = False
+    info['nchan'] = info['sfreq'] = 0
+    info['dev_head_t'] = Transform('meg', 'head', np.eye(4))
+    assert set(info.keys()) == set(RAW_INFO_FIELDS)
+    info._check_consistency()
     return info
diff --git a/mne/io/open.py b/mne/io/open.py
index f3b8c71..bcc1ce0 100644
--- a/mne/io/open.py
+++ b/mne/io/open.py
@@ -9,7 +9,7 @@ import os.path as op
 from io import BytesIO
 
 from .tag import read_tag_info, read_tag, read_big, Tag
-from .tree import make_dir_tree
+from .tree import make_dir_tree, dir_tree_find
 from .constants import FIFF
 from ..utils import logger, verbose
 from ..externals import six
@@ -31,6 +31,47 @@ def _fiff_get_fid(fname):
     return fid
 
 
+def _get_next_fname(fid, fname, tree):
+    """Auxiliary function to get the next filename in split files."""
+    nodes_list = dir_tree_find(tree, FIFF.FIFFB_REF)
+    next_fname = None
+    for nodes in nodes_list:
+        next_fname = None
+        for ent in nodes['directory']:
+            if ent.kind == FIFF.FIFF_REF_ROLE:
+                tag = read_tag(fid, ent.pos)
+                role = int(tag.data)
+                if role != FIFF.FIFFV_ROLE_NEXT_FILE:
+                    next_fname = None
+                    break
+            if ent.kind == FIFF.FIFF_REF_FILE_NAME:
+                tag = read_tag(fid, ent.pos)
+                next_fname = op.join(op.dirname(fname), tag.data)
+            if ent.kind == FIFF.FIFF_REF_FILE_NUM:
+                # Some files don't have the name, just the number. So
+                # we construct the name from the current name.
+                if next_fname is not None:
+                    continue
+                next_num = read_tag(fid, ent.pos).data
+                path, base = op.split(fname)
+                idx = base.find('.')
+                idx2 = base.rfind('-')
+                if idx2 < 0 and next_num == 1:
+                    # this is the first file, which may not be numbered
+                    next_fname = op.join(
+                        path, '%s-%d.%s' % (base[:idx], next_num,
+                                            base[idx + 1:]))
+                    continue
+                num_str = base[idx2 + 1:idx]
+                if not num_str.isdigit():
+                    continue
+                next_fname = op.join(path, '%s-%d.%s' % (base[:idx2],
+                                     next_num, base[idx + 1:]))
+        if next_fname is not None:
+            break
+    return next_fname
+
+
 @verbose
 def fiff_open(fname, preload=False, verbose=None):
     """Open a FIF file.
@@ -130,12 +171,11 @@ def show_fiff(fname, indent='    ', read_limit=np.inf, max_str=30,
         Max number of characters of string representation to print for
         each tag's data.
     output : type
-        Either str or list. str is equivalent to ``"\n".join(list)``,
-        which is more convenient for using ``print show_fiff(...)``.
+        Either str or list. str is a convenience output for printing.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
-    if not output in [list, str]:
+    if output not in [list, str]:
         raise ValueError('output must be list or str')
     f, tree, directory = fiff_open(fname)
     with f as fid:
@@ -149,8 +189,8 @@ def show_fiff(fname, indent='    ', read_limit=np.inf, max_str=30,
 def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']):
     """Helper to find matching values"""
     vals = [k for k, v in six.iteritems(FIFF)
-            if v == value and any([fmt in k for fmt in fmts])
-            and not any(exc in k for exc in exclude)]
+            if v == value and any(fmt in k for fmt in fmts) and
+            not any(exc in k for exc in exclude)]
     return vals
 
 
@@ -159,8 +199,8 @@ def _show_tree(fid, tree, indent, level, read_limit, max_str):
     this_idt = indent * level
     next_idt = indent * (level + 1)
     # print block-level information
-    out = [this_idt + str(tree['block'][0]) + ' = '
-           + '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))]
+    out = [this_idt + str(tree['block'][0]) + ' = ' +
+           '/'.join(_find_type(tree['block'], fmts=['FIFFB_']))]
     if tree['directory'] is not None:
         kinds = [ent.kind for ent in tree['directory']] + [-1]
         sizes = [ent.size for ent in tree['directory']]
@@ -199,9 +239,9 @@ def _show_tree(fid, tree, indent, level, read_limit, max_str):
                     else:
                         postpend += ' ... (unknown type)'
                 postpend = '>' * 20 + 'BAD' if not good else postpend
-                out += [next_idt + prepend + str(k) + ' = '
-                        + '/'.join(this_type) + ' (' + str(size) + ')'
-                        + postpend]
+                out += [next_idt + prepend + str(k) + ' = ' +
+                        '/'.join(this_type) + ' (' + str(size) + ')' +
+                        postpend]
                 counter = 0
                 good = True
 
diff --git a/mne/io/pick.py b/mne/io/pick.py
index c6347a4..027445f 100644
--- a/mne/io/pick.py
+++ b/mne/io/pick.py
@@ -27,7 +27,8 @@ def channel_type(info, idx):
     Returns
     -------
     type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'
-           'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst'
+           'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst' | 'misc'
+           'seeg' | 'chpi'
         Type of channel
     """
     kind = info['chs'][idx]['kind']
@@ -58,6 +59,8 @@ def channel_type(info, idx):
         return 'ias'
     elif kind == FIFF.FIFFV_SYST_CH:
         return 'syst'
+    elif kind == FIFF.FIFFV_SEEG_CH:
+        return 'seeg'
     elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
                   FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
                   FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
@@ -79,6 +82,11 @@ def pick_channels(ch_names, include, exclude=[]):
         List of channels to include (if empty include all available).
     exclude : list of string
         List of channels to exclude (if empty do not exclude any channel).
+        Defaults to [].
+
+    See Also
+    --------
+    pick_channels_regexp, pick_types
 
     Returns
     -------
@@ -87,6 +95,8 @@ def pick_channels(ch_names, include, exclude=[]):
     """
     if len(np.unique(ch_names)) != len(ch_names):
         raise RuntimeError('ch_names is not a unique list, picking is unsafe')
+    _check_excludes_includes(include)
+    _check_excludes_includes(exclude)
     sel = []
     for k, name in enumerate(ch_names):
         if (len(include) == 0 or name in include) and name not in exclude:
@@ -115,6 +125,10 @@ def pick_channels_regexp(ch_names, regexp):
     sel : array of int
         Indices of good channels.
 
+    See Also
+    --------
+    pick_channels
+
     Examples
     --------
     >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
@@ -128,7 +142,7 @@ def pick_channels_regexp(ch_names, regexp):
 
 def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
                emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
-               exci=False, ias=False, syst=False,
+               exci=False, ias=False, syst=False, seeg=False,
                include=[], exclude='bads', selection=None):
     """Pick channels by type and names
 
@@ -143,14 +157,14 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
         gradiometer.
     eeg : bool
         If True include EEG channels.
+    stim : bool
+        If True include stimulus channels.
     eog : bool
         If True include EOG channels.
     ecg : bool
         If True include ECG channels.
     emg : bool
         If True include EMG channels.
-    stim : bool
-        If True include stimulus channels.
     ref_meg: bool | str
         If True include CTF / 4D reference channels. If 'auto', the reference
         channels are only included if compensations are present.
@@ -167,11 +181,13 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
         Internal Active Shielding data (maybe on Triux only).
     syst : bool
         System status channel information (on Triux systems only).
+    seeg : bool
+        Stereotactic EEG channels
     include : list of string
         List of additional channels to include. If empty do not include any.
     exclude : list of string | str
-        List of channels to exclude. If empty do not exclude any (default).
-        If 'bads', exclude channels in info['bads'].
+        List of channels to exclude. If 'bads' (default), exclude channels
+        in info['bads'].
     selection : list of string
         Restrict sensor channels (MEG, EEG) to this list of channel names.
 
@@ -180,6 +196,13 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
     sel : array of int
         Indices of good channels.
     """
+    # NOTE: Changes to this function's signature should also be changed in
+    # PickChannelsMixin
+    from .meas_info import Info
+    if not isinstance(info, Info):
+        raise TypeError('info must be an instance of Info, not %s'
+                        % type(info))
+    info._check_consistency()
     nchan = info['nchan']
     pick = np.zeros(nchan, dtype=np.bool)
 
@@ -187,7 +210,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
         raise ValueError('exclude must be a list of strings or "bads"')
     elif exclude == 'bads':
         exclude = info.get('bads', [])
-    elif not isinstance(exclude, list):
+    elif not isinstance(exclude, (list, tuple)):
         raise ValueError('exclude must either be "bads" or a list of strings.'
                          ' If only one channel is to be excluded, use '
                          '[ch_name] instead of passing ch_name.')
@@ -211,8 +234,8 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
                     pick[k] = True
                 elif meg == 'planar2' and info['ch_names'][k].endswith('3'):
                     pick[k] = True
-            elif (meg == 'mag'
-                    and info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T):
+            elif (meg == 'mag' and
+                  info['chs'][k]['unit'] == FIFF.FIFF_UNIT_T):
                 pick[k] = True
         elif kind == FIFF.FIFFV_EEG_CH and eeg:
             pick[k] = True
@@ -232,6 +255,8 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
             pick[k] = True
         elif kind == FIFF.FIFFV_SYST_CH and syst:
             pick[k] = True
+        elif kind == FIFF.FIFFV_SEEG_CH and seeg:
+            pick[k] = True
         elif kind == FIFF.FIFFV_IAS_CH and ias:
             pick[k] = True
         elif kind == FIFF.FIFFV_EXCI_CH and exci:
@@ -247,9 +272,9 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
         # the selection only restricts these types of channels
         sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
                     FIFF.FIFFV_EEG_CH]
-        for k in np.where(pick == True)[0]:
-            if (info['chs'][k]['kind'] in sel_kind
-                and info['ch_names'][k] not in selection):
+        for k in np.where(pick == True)[0]:  # noqa
+            if (info['chs'][k]['kind'] in sel_kind and
+                    info['ch_names'][k] not in selection):
                 pick[k] = False
 
     myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
@@ -270,7 +295,7 @@ def pick_info(info, sel=[], copy=True):
     ----------
     info : dict
         Info structure from evoked or raw data.
-    sel : list of int
+    sel : list of int | None
         Indices of channels to include.
     copy : bool
         If copy is False, info is modified inplace.
@@ -280,15 +305,31 @@ def pick_info(info, sel=[], copy=True):
     res : dict
         Info structure restricted to a selection of channels.
     """
+    info._check_consistency()
     if copy:
         info = deepcopy(info)
-
-    if len(sel) == 0:
-        raise ValueError('Warning : No channels match the selection.')
+    if sel is None:
+        return info
+    elif len(sel) == 0:
+        raise ValueError('No channels match the selection.')
 
     info['chs'] = [info['chs'][k] for k in sel]
     info['ch_names'] = [info['ch_names'][k] for k in sel]
     info['nchan'] = len(sel)
+    info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
+
+    comps = deepcopy(info['comps'])
+    for c in comps:
+        row_idx = [k for k, n in enumerate(c['data']['row_names'])
+                   if n in info['ch_names']]
+        row_names = [c['data']['row_names'][i] for i in row_idx]
+        rowcals = c['rowcals'][row_idx]
+        c['rowcals'] = rowcals
+        c['data']['nrow'] = len(row_names)
+        c['data']['row_names'] = row_names
+        c['data']['data'] = c['data']['data'][row_idx]
+    info['comps'] = comps
+
     return info
 
 
@@ -313,9 +354,9 @@ def pick_channels_evoked(orig, include=[], exclude='bads'):
         One evoked dataset.
     include : list of string, (optional)
         List of channels to include (if empty, include all available).
-    exclude : list of string, (optional) | 'bads'
-        Channels to exclude (if empty, do not exclude any).
-         Defaults to 'bads'.
+    exclude : list of string | str
+        List of channels to exclude. If empty do not exclude any (default).
+        If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.
 
     Returns
     -------
@@ -323,10 +364,11 @@ def pick_channels_evoked(orig, include=[], exclude='bads'):
         Evoked data restricted to selected channels. If include and
         exclude are empty it returns orig without copy.
     """
-
     if len(include) == 0 and len(exclude) == 0:
         return orig
 
+    exclude = _check_excludes_includes(exclude, info=orig.info,
+                                       allow_bads=True)
     sel = pick_channels(orig.info['ch_names'], include=include,
                         exclude=exclude)
 
@@ -346,66 +388,6 @@ def pick_channels_evoked(orig, include=[], exclude='bads'):
     return res
 
 
-def pick_types_evoked(orig, meg=True, eeg=False, stim=False, eog=False,
-                      ecg=False, emg=False, ref_meg=False, misc=False,
-                      resp=False, chpi=False, exci=False, ias=False,
-                      syst=False, include=[], exclude='bads'):
-    """Pick by channel type and names from evoked data
-
-    Parameters
-    ----------
-    info : dict
-        The measurement info
-    meg : bool or string
-        If True include all MEG channels. If False include None
-        If string it can be 'mag' or 'grad' to select only gradiometers
-        or magnetometers.
-    eeg : bool
-        If True include EEG channels
-    eog : bool
-        If True include EOG channels
-    ecg : bool
-        If True include ECG channels
-    emg : bool
-        If True include EMG channels
-    stim : bool
-        If True include stimulus channels
-    ref_meg : bool
-        If True include CTF / 4D reference channels
-    misc : bool
-        If True include miscellaneous analog channels
-    resp : bool
-        If True include response-trigger channel. For some MEG systems this
-        is separate from the stim channel.
-    chpi : bool
-        If True include continuous HPI coil channels.
-    exci : bool
-        Flux excitation channel used to be a stimulus channel.
-    ias : bool
-        Internal Active Shielding data (maybe on Triux only).
-    syst : bool
-        System status channel information (on Triux systems only).
-    include : list of string
-        List of additional channels to include. If empty do not include any.
-    exclude : list of string | str
-        List of channels to exclude. If empty do not exclude any (default).
-        If 'bads', exclude channels in info['bads'].
-
-    Returns
-    -------
-    res : instance of Evoked
-        Evoked data restricted to selected channels. If include and
-        exclude are None it returns orig without copy.
-    """
-    sel = pick_types(info=orig.info, meg=meg, eeg=eeg, stim=stim, eog=eog,
-                     ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc,
-                     resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst,
-                     include=include, exclude=exclude)
-
-    include_ch_names = [orig.ch_names[k] for k in sel]
-    return pick_channels_evoked(orig, include_ch_names)
-
-
 @verbose
 def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
     """Pick channels from forward operator
@@ -414,11 +396,12 @@ def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
     ----------
     orig : dict
         A forward solution.
-    include : list of string (optional) | None
-        List of channels to include (if empty, include all available). Defaults
-        auto None.
-    exclude : list of string (optional) | None
-        Channels to exclude (if empty, do not exclude any). Defaults to None.
+    include : list of string
+        List of channels to include (if empty, include all available).
+        Defaults to [].
+    exclude : list of string | 'bads'
+        Channels to exclude (if empty, do not exclude any). Defaults to [].
+        If 'bads', then exclude bad channels in orig.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -428,49 +411,61 @@ def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
         Forward solution restricted to selected channels. If include and
         exclude are empty it returns orig without copy.
     """
-
+    orig['info']._check_consistency()
     if len(include) == 0 and len(exclude) == 0:
         return orig
+    exclude = _check_excludes_includes(exclude,
+                                       info=orig['info'], allow_bads=True)
 
-    sel = pick_channels(orig['sol']['row_names'], include=include,
-                        exclude=exclude)
+    # Allow for possibility of channel ordering in forward solution being
+    # different from that of the M/EEG file it is based on.
+    sel_sol = pick_channels(orig['sol']['row_names'], include=include,
+                            exclude=exclude)
+    sel_info = pick_channels(orig['info']['ch_names'], include=include,
+                             exclude=exclude)
 
     fwd = deepcopy(orig)
 
+    # Check that forward solution and original data file agree on #channels
+    if len(sel_sol) != len(sel_info):
+        raise ValueError('Forward solution and functional data appear to '
+                         'have different channel names, please check.')
+
     #   Do we have something?
-    nuse = len(sel)
+    nuse = len(sel_sol)
     if nuse == 0:
         raise ValueError('Nothing remains after picking')
 
     logger.info('    %d out of %d channels remain after picking'
                 % (nuse, fwd['nchan']))
 
-    #   Pick the correct rows of the forward operator
-    fwd['sol']['data'] = fwd['sol']['data'][sel, :]
-    fwd['_orig_sol'] = fwd['_orig_sol'][sel, :]
+    #   Pick the correct rows of the forward operator using sel_sol
+    fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
+    fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
     fwd['sol']['nrow'] = nuse
 
-    ch_names = [fwd['sol']['row_names'][k] for k in sel]
+    ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
     fwd['nchan'] = nuse
     fwd['sol']['row_names'] = ch_names
 
-    fwd['info']['ch_names'] = [fwd['info']['ch_names'][k] for k in sel]
-    fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel]
+    # Pick the appropriate channel names from the info-dict using sel_info
+    fwd['info']['ch_names'] = [fwd['info']['ch_names'][k] for k in sel_info]
+    fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
     fwd['info']['nchan'] = nuse
     fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
 
     if fwd['sol_grad'] is not None:
-        fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel, :]
-        fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel, :]
+        fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
+        fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
         fwd['sol_grad']['nrow'] = nuse
         fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
-                                        for k in sel]
+                                        for k in sel_sol]
 
     return fwd
 
 
-def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, include=[],
-                       exclude=[]):
+def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,
+                       include=[], exclude=[]):
     """Pick by channel type and names from a forward operator
 
     Parameters
@@ -485,6 +480,8 @@ def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, include=[],
         If True include EEG channels
     ref_meg : bool
         If True include CTF / 4D reference channels
+    seeg : bool
+        If True include stereotactic EEG channels
     include : list of string
         List of additional channels to include. If empty do not include any.
     exclude : list of string | str
@@ -497,8 +494,8 @@ def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, include=[],
         Forward solution restricted to selected channel types.
     """
     info = orig['info']
-    sel = pick_types(info, meg, eeg, ref_meg=ref_meg, include=include,
-                     exclude=exclude)
+    sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg,
+                     include=include, exclude=exclude)
     if len(sel) == 0:
         raise ValueError('No valid channels found')
     include_ch_names = [info['ch_names'][k] for k in sel]
@@ -508,7 +505,9 @@ def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, include=[],
 def channel_indices_by_type(info):
     """Get indices of channels by type
     """
-    idx = dict(grad=[], mag=[], eeg=[], eog=[], ecg=[])
+    idx = dict(grad=[], mag=[], eeg=[], seeg=[], eog=[], ecg=[], stim=[],
+               emg=[], ref_meg=[], misc=[], resp=[], chpi=[], exci=[], ias=[],
+               syst=[])
     for k, ch in enumerate(info['chs']):
         for key in idx.keys():
             if channel_type(info, k) == key:
@@ -534,12 +533,91 @@ def pick_channels_cov(orig, include=[], exclude='bads'):
     res : dict
         Covariance solution restricted to selected channels.
     """
+    exclude = orig['bads'] if exclude == 'bads' else exclude
     sel = pick_channels(orig['names'], include=include, exclude=exclude)
     res = deepcopy(orig)
     res['dim'] = len(sel)
-    res['data'] = orig['data'][sel][:, sel]
+    if not res['diag']:
+        res['data'] = orig['data'][sel][:, sel]
+    else:
+        res['data'] = orig['data'][sel]
     res['names'] = [orig['names'][k] for k in sel]
     res['bads'] = [name for name in orig['bads'] if name in res['names']]
     res['eig'] = None
     res['eigvec'] = None
     return res
+
+
+def _picks_by_type(info, meg_combined=False, ref_meg=False):
+    """Get data channel indices as separate list of tuples
+    Parameters
+    ----------
+    info : instance of mne.measuerment_info.Info
+        The info.
+    meg_combined : bool
+        Whether to return combined picks for grad and mag.
+    ref_meg : bool
+        If True include CTF / 4D reference channels
+
+    Returns
+    -------
+    picks_list : list of tuples
+        The list of tuples of picks and the type string.
+    """
+    from ..channels.channels import _contains_ch_type
+    picks_list = []
+    has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)
+                                  for k in ('mag', 'grad', 'eeg')]
+    if has_mag and (meg_combined is not True or not has_grad):
+        picks_list.append(
+            ('mag', pick_types(info, meg='mag', eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_grad and (meg_combined is not True or not has_mag):
+        picks_list.append(
+            ('grad', pick_types(info, meg='grad', eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_mag and has_grad and meg_combined is True:
+        picks_list.append(
+            ('meg', pick_types(info, meg=True, eeg=False, stim=False,
+             ref_meg=ref_meg))
+        )
+    if has_eeg:
+        picks_list.append(
+            ('eeg', pick_types(info, meg=False, eeg=True, stim=False,
+             ref_meg=ref_meg))
+        )
+    return picks_list
+
+
+def _check_excludes_includes(chs, info=None, allow_bads=False):
+    """Ensure that inputs to exclude/include are list-like or "bads".
+
+    Parameters
+    ----------
+    chs : any input, should be list, tuple, string
+        The channels passed to include or exclude.
+    allow_bads : bool
+        Allow the user to supply "bads" as a string for auto exclusion.
+
+    Returns
+    -------
+    chs : list
+        Channels to be excluded/excluded. If allow_bads, and chs=="bads",
+        this will be the bad channels found in 'info'.
+    """
+    from .meas_info import Info
+    if not isinstance(chs, (list, tuple, np.ndarray)):
+        if allow_bads is True:
+            if not isinstance(info, Info):
+                raise ValueError('Supply an info object if allow_bads is true')
+            elif chs != 'bads':
+                raise ValueError('If chs is a string, it must be "bads"')
+            else:
+                chs = info['bads']
+        else:
+            raise ValueError(
+                'include/exclude must be list, tuple, ndarray, or "bads". ' +
+                'You provided type {0}'.format(type(chs)))
+    return chs
diff --git a/mne/io/proc_history.py b/mne/io/proc_history.py
new file mode 100644
index 0000000..50d065f
--- /dev/null
+++ b/mne/io/proc_history.py
@@ -0,0 +1,290 @@
+# -*- coding: utf-8 -*-
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: Simplified BSD
+
+import numpy as np
+from scipy.sparse import csc_matrix
+import warnings
+
+from .open import read_tag
+from .tree import dir_tree_find
+from .write import (start_block, end_block, write_int, write_float,
+                    write_string, write_float_matrix, write_int_matrix,
+                    write_float_sparse_rcs, write_id)
+from .constants import FIFF
+from ..externals.six import text_type
+
+
+_proc_keys = ['parent_file_id', 'block_id', 'parent_block_id',
+              'date', 'experimenter', 'creator']
+_proc_ids = [FIFF.FIFF_PARENT_FILE_ID,
+             FIFF.FIFF_BLOCK_ID,
+             FIFF.FIFF_PARENT_BLOCK_ID,
+             FIFF.FIFF_MEAS_DATE,
+             FIFF.FIFF_EXPERIMENTER,
+             FIFF.FIFF_CREATOR]
+_proc_writers = [write_id, write_id, write_id,
+                 write_int, write_string, write_string]
+_proc_casters = [dict, dict, dict,
+                 np.array, text_type, text_type]
+
+
+def _read_proc_history(fid, tree, info):
+    """Read processing history from fiff file
+
+    This function reads the SSS info, the CTC correction and the
+    calibaraions from the SSS processing logs inside af a raw file
+    (C.f. Maxfilter v2.2 manual (October 2010), page 21):
+
+    104 = {                 900 = proc. history
+      104 = {               901 = proc. record
+        103 = block ID
+        204 = date
+        212 = scientist
+        113 = creator program
+        104 = {             502 = SSS info
+          264 = SSS task
+          263 = SSS coord frame
+          265 = SSS origin
+          266 = SSS ins.order
+          267 = SSS outs.order
+          268 = SSS nr chnls
+          269 = SSS components
+          278 = SSS nfree
+          243 = HPI g limit    0.98
+          244 = HPI dist limit 0.005
+        105 = }             502 = SSS info
+        104 = {             504 = MaxST info
+          264 = SSS task
+          272 = SSST subspace correlation
+          279 = SSST buffer length
+        105 = }
+        104 = {             501 = CTC correction
+          103 = block ID
+          204 = date
+          113 = creator program
+          800 = CTC matrix
+          3417 = proj item chs
+        105 = }             501 = CTC correction
+        104 = {             503 = SSS finecalib.
+          270 = SSS cal chnls
+          271 = SSS cal coeff
+        105 = }             503 = SSS finecalib.
+      105 = }               901 = proc. record
+    105 = }                 900 = proc. history
+    """
+    proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY)
+    out = list()
+    if len(proc_history) > 0:
+        proc_history = proc_history[0]
+        proc_records = dir_tree_find(proc_history,
+                                     FIFF.FIFFB_PROCESSING_RECORD)
+        for proc_record in proc_records:
+            record = dict()
+            for i_ent in range(proc_record['nent']):
+                kind = proc_record['directory'][i_ent].kind
+                pos = proc_record['directory'][i_ent].pos
+                for key, id_, cast in zip(_proc_keys, _proc_ids,
+                                          _proc_casters):
+                    if kind == id_:
+                        tag = read_tag(fid, pos)
+                        record[key] = cast(tag.data)
+                        break
+                else:
+                    warnings.warn('Unknown processing history item %s' % kind)
+            record['max_info'] = _read_maxfilter_record(fid, proc_record)
+            smartshields = dir_tree_find(proc_record,
+                                         FIFF.FIFFB_SMARTSHIELD)
+            if len(smartshields) > 0:
+                # XXX should eventually populate this
+                ss = [dict() for _ in range(len(smartshields))]
+                record['smartshield'] = ss
+            if len(record['max_info']) > 0:
+                out.append(record)
+        if len(proc_records) > 0:
+            info['proc_history'] = out
+
+
+def _write_proc_history(fid, info):
+    """Write processing history to file"""
+    if 'proc_history' not in info:
+        return
+    if len(info['proc_history']) > 0:
+        start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
+        for record in info['proc_history']:
+            start_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
+            for key, id_, writer in zip(_proc_keys, _proc_ids, _proc_writers):
+                if key in record:
+                    writer(fid, id_, record[key])
+            _write_maxfilter_record(fid, record['max_info'])
+            if 'smartshield' in record:
+                for ss in record['smartshield']:
+                    start_block(fid, FIFF.FIFFB_SMARTSHIELD)
+                    # XXX should eventually populate this
+                    end_block(fid, FIFF.FIFFB_SMARTSHIELD)
+            end_block(fid, FIFF.FIFFB_PROCESSING_RECORD)
+        end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY)
+
+
+_sss_info_keys = ('job', 'frame', 'origin', 'in_order',
+                  'out_order', 'nchan', 'components', 'nfree',
+                  'hpi_g_limit', 'hpi_dist_limit')
+_sss_info_ids = (FIFF.FIFF_SSS_JOB,
+                 FIFF.FIFF_SSS_FRAME,
+                 FIFF.FIFF_SSS_ORIGIN,
+                 FIFF.FIFF_SSS_ORD_IN,
+                 FIFF.FIFF_SSS_ORD_OUT,
+                 FIFF.FIFF_SSS_NMAG,
+                 FIFF.FIFF_SSS_COMPONENTS,
+                 FIFF.FIFF_SSS_NFREE,
+                 FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
+                 FIFF.FIFF_HPI_FIT_DIST_LIMIT)
+_sss_info_writers = (write_int, write_int, write_float, write_int,
+                     write_int, write_int, write_int, write_int,
+                     write_float, write_float)
+_sss_info_casters = (int, int, np.array, int,
+                     int, int, np.array, int,
+                     float, float)
+
+_max_st_keys = ('job', 'subspcorr', 'buflen')
+_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR,
+               FIFF.FIFF_SSS_ST_LENGTH)
+_max_st_writers = (write_int, write_float, write_float)
+_max_st_casters = (int, float, float)
+
+_sss_ctc_keys = ('parent_file_id', 'block_id', 'parent_block_id',
+                 'date', 'creator', 'decoupler')
+_sss_ctc_ids = (FIFF.FIFF_PARENT_FILE_ID,
+                FIFF.FIFF_BLOCK_ID,
+                FIFF.FIFF_PARENT_BLOCK_ID,
+                FIFF.FIFF_MEAS_DATE,
+                FIFF.FIFF_CREATOR,
+                FIFF.FIFF_DECOUPLER_MATRIX)
+_sss_ctc_writers = (write_id, write_id, write_id,
+                    write_int, write_string, write_float_sparse_rcs)
+_sss_ctc_casters = (dict, dict, dict,
+                    np.array, text_type, csc_matrix)
+
+_sss_cal_keys = ('cal_chans', 'cal_corrs')
+_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS)
+_sss_cal_writers = (write_int_matrix, write_float_matrix)
+_sss_cal_casters = (np.array, np.array)
+
+
+def _read_maxfilter_record(fid, tree):
+    """Read maxfilter processing record from file"""
+    sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO)  # 502
+    sss_info = dict()
+    if len(sss_info_block) > 0:
+        sss_info_block = sss_info_block[0]
+        for i_ent in range(sss_info_block['nent']):
+            kind = sss_info_block['directory'][i_ent].kind
+            pos = sss_info_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_info_keys, _sss_info_ids,
+                                      _sss_info_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_info[key] = cast(tag.data)
+                    break
+
+    max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO)  # 504
+    max_st = dict()
+    if len(max_st_block) > 0:
+        max_st_block = max_st_block[0]
+        for i_ent in range(max_st_block['nent']):
+            kind = max_st_block['directory'][i_ent].kind
+            pos = max_st_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_max_st_keys, _max_st_ids,
+                                      _max_st_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    max_st[key] = cast(tag.data)
+                    break
+
+    sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER)  # 501
+    sss_ctc = dict()
+    if len(sss_ctc_block) > 0:
+        sss_ctc_block = sss_ctc_block[0]
+        for i_ent in range(sss_ctc_block['nent']):
+            kind = sss_ctc_block['directory'][i_ent].kind
+            pos = sss_ctc_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids,
+                                      _sss_ctc_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_ctc[key] = cast(tag.data)
+                    break
+            else:
+                if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST:
+                    tag = read_tag(fid, pos)
+                    sss_ctc['proj_items_chs'] = tag.data.split(':')
+
+    sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL)  # 503
+    sss_cal = dict()
+    if len(sss_cal_block) > 0:
+        sss_cal_block = sss_cal_block[0]
+        for i_ent in range(sss_cal_block['nent']):
+            kind = sss_cal_block['directory'][i_ent].kind
+            pos = sss_cal_block['directory'][i_ent].pos
+            for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids,
+                                      _sss_cal_casters):
+                if kind == id_:
+                    tag = read_tag(fid, pos)
+                    sss_cal[key] = cast(tag.data)
+                    break
+
+    max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc,
+                    sss_cal=sss_cal, max_st=max_st)
+    return max_info
+
+
+def _write_maxfilter_record(fid, record):
+    """Write maxfilter processing record to file"""
+    sss_info = record['sss_info']
+    if len(sss_info) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_INFO)
+        for key, id_, writer in zip(_sss_info_keys, _sss_info_ids,
+                                    _sss_info_writers):
+            if key in sss_info:
+                writer(fid, id_, sss_info[key])
+        end_block(fid, FIFF.FIFFB_SSS_INFO)
+
+    max_st = record['max_st']
+    if len(max_st) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_ST_INFO)
+        for key, id_, writer in zip(_max_st_keys, _max_st_ids,
+                                    _max_st_writers):
+            if key in max_st:
+                writer(fid, id_, max_st[key])
+        end_block(fid, FIFF.FIFFB_SSS_ST_INFO)
+
+    sss_ctc = record['sss_ctc']
+    if len(sss_ctc) > 0:  # dict has entries
+        start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
+        for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids,
+                                    _sss_ctc_writers):
+            if key in sss_ctc:
+                writer(fid, id_, sss_ctc[key])
+        if 'proj_items_chs' in sss_ctc:
+            write_string(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
+                         ':'.join(sss_ctc['proj_items_chs']))
+        end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER)
+
+    sss_cal = record['sss_cal']
+    if len(sss_cal) > 0:
+        start_block(fid, FIFF.FIFFB_SSS_CAL)
+        for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids,
+                                    _sss_cal_writers):
+            if key in sss_cal:
+                writer(fid, id_, sss_cal[key])
+        end_block(fid, FIFF.FIFFB_SSS_CAL)
+
+
+def _get_sss_rank(sss):
+    """Get SSS rank"""
+    inside = sss['sss_info']['in_order']
+    nfree = (inside + 1) ** 2 - 1
+    nfree -= (len(sss['sss_info']['components'][:nfree]) -
+              sss['sss_info']['components'][:nfree].sum())
+    return nfree
diff --git a/mne/io/proj.py b/mne/io/proj.py
index dd92f03..0ab52e2 100644
--- a/mne/io/proj.py
+++ b/mne/io/proj.py
@@ -1,6 +1,7 @@
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
 #          Denis Engemann <denis.engemann at gmail.com>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -9,12 +10,16 @@ from math import sqrt
 import numpy as np
 from scipy import linalg
 from itertools import count
+import warnings
 
 from .tree import dir_tree_find
 from .tag import find_tag
 from .constants import FIFF
 from .pick import pick_types
+from .write import (write_int, write_float, write_string, write_name_list,
+                    write_float_matrix, end_block, start_block)
 from ..utils import logger, verbose
+from ..externals.six import string_types
 
 
 class Projection(dict):
@@ -31,7 +36,34 @@ class Projection(dict):
 
 class ProjMixin(object):
     """Mixin class for Raw, Evoked, Epochs
+
+    Notes
+    -----
+    This mixin adds a proj attribute as a property to data containers.
+    It is True if at least one proj is present and all of them are active.
+    The projs might not be applied yet if data are not preloaded. In
+    this case it's the _projector attribute that does the job.
+    If a private _data attribute is present then the projs applied
+    to it are the ones marked as active.
+
+    A proj parameter passed in constructor of raw or epochs calls
+    apply_proj and hence after the .proj attribute is True.
+
+    As soon as you've applied the projs it will stay active in the
+    remaining pipeline.
+
+    The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
+
+    When you use delayed SSP in Epochs, projs are applied when you call
+    get_data() method. They are not applied to the evoked._data unless you call
+    apply_proj(). The reason is that you want to reject with projs although
+    it's not stored in proj mode.
     """
+    @property
+    def proj(self):
+        return (len(self.info['projs']) > 0 and
+                all(p['active'] for p in self.info['projs']))
+
     def add_proj(self, projs, remove_existing=False):
         """Add SSP projection vectors
 
@@ -51,7 +83,7 @@ class ProjMixin(object):
             projs = [projs]
 
         if (not isinstance(projs, list) and
-                not all([isinstance(p, Projection) for p in projs])):
+                not all(isinstance(p, Projection) for p in projs)):
             raise ValueError('Only projs can be added. You supplied '
                              'something else.')
 
@@ -96,14 +128,21 @@ class ProjMixin(object):
         self : instance of Raw | Epochs | Evoked
             The instance.
         """
-        if self.info['projs'] is None:
+        from ..epochs import _BaseEpochs
+        from .base import _BaseRaw
+        if self.info['projs'] is None or len(self.info['projs']) == 0:
             logger.info('No projector specified for this dataset.'
                         'Please consider the method self.add_proj.')
             return self
 
-        if all([p['active'] for p in self.info['projs']]):
-            logger.info('Projections have already been applied. Doing '
-                        'nothing.')
+        # Exit delayed mode if you apply proj
+        if isinstance(self, _BaseEpochs) and self._do_delayed_proj:
+            logger.info('Leaving delayed SSP mode.')
+            self._do_delayed_proj = False
+
+        if all(p['active'] for p in self.info['projs']):
+            logger.info('Projections have already been applied. '
+                        'Setting proj attribute to True.')
             return self
 
         _projector, info = setup_proj(deepcopy(self.info), activate=True,
@@ -115,30 +154,18 @@ class ProjMixin(object):
             return self
 
         self._projector, self.info = _projector, info
-        self.proj = True  # track that proj were applied
-        # handle different data / preload attrs and create reference
-        # this also helps avoiding circular imports
-        for attr in ('get_data', '_data', 'data'):
-            data = getattr(self, attr, None)
-            if data is None:
-                continue
-            elif callable(data):
-                if self.preload:
-                    data = np.empty_like(self._data)
-                    for ii, e in enumerate(self._data):
-                        data[ii] = self._preprocess(np.dot(self._projector, e),
-                                                    self.verbose)
-                else:  # get data knows what to do.
-                    data = data()
+        if isinstance(self, _BaseRaw):
+            if self.preload:
+                self._data = np.dot(self._projector, self._data)
+        elif isinstance(self, _BaseEpochs):
+            if self.preload:
+                for ii, e in enumerate(self._data):
+                    self._data[ii] = self._project_epoch(e)
             else:
-                data = np.dot(self._projector, data)
-            break
+                self.load_data()  # will automatically apply
+        else:  # Evoked
+            self.data = np.dot(self._projector, self.data)
         logger.info('SSP projectors applied...')
-        if hasattr(self, '_data'):
-            self._data = data
-        else:
-            self.data = data
-
         return self
 
     def del_proj(self, idx):
@@ -164,8 +191,56 @@ class ProjMixin(object):
 
         return self
 
+    def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
+        """Plot SSP vector
+
+        Parameters
+        ----------
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
+            The channel type to plot. For 'grad', the gradiometers are collec-
+            ted in pairs and the RMS for each pair is plotted. If None
+            (default), it will return all channel types present. If a list of
+            ch_types is provided, it will return multiple figures.
+        layout : None | Layout | List of Layouts
+            Layout instance specifying sensor positions (does not need to
+            be specified for Neuromag data). If possible, the correct
+            layout file is inferred from the data; if no appropriate layout
+            file was found, the layout is automatically generated from the
+            sensor locations. Or a list of Layout if projections
+            are from different sensor types.
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of projectors. If instance of Axes,
+            there must be only one projector. Defaults to None.
 
-def proj_equal(a, b):
+        Returns
+        -------
+        fig : instance of matplotlib figure
+            Figure distributing one image per channel across sensor topography.
+        """
+        if self.info['projs'] is not None or len(self.info['projs']) != 0:
+            from ..viz.topomap import plot_projs_topomap
+            from ..channels.layout import find_layout
+            if layout is None:
+                layout = []
+                if ch_type is None:
+                    ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
+                elif isinstance(ch_type, string_types):
+                    ch_type = [ch_type]
+                for ch in ch_type:
+                    if ch in self:
+                        layout.append(find_layout(self.info, ch, exclude=[]))
+                    else:
+                        err = 'Channel type %s is not found in info.' % ch
+                        warnings.warn(err)
+            fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
+        else:
+            raise ValueError("Info is missing projs. Nothing to plot.")
+
+        return fig
+
+
+def _proj_equal(a, b):
     """ Test if two projectors are equal """
 
     equal = (a['active'] == b['active'] and
@@ -294,13 +369,10 @@ def _read_proj(fid, node, verbose=None):
 
     return projs
 
+
 ###############################################################################
 # Write
 
-from .write import (write_int, write_float, write_string, write_name_list,
-                    write_float_matrix, end_block, start_block)
-
-
 def _write_proj(fid, projs):
     """Write a projection operator to a file.
 
@@ -403,8 +475,8 @@ def make_projector(projs, ch_names, bads=[], include_active=True):
 
             # If there is something to pick, pickit
             if len(sel) > 0:
-                for v in range(p['data']['nrow']):
-                    vecs[sel, nvec + v] = p['data']['data'][v, vecsel].T
+                nrow = p['data']['nrow']
+                vecs[sel, nvec:nvec + nrow] = p['data']['data'][:, vecsel].T
 
             # Rescale for better detection of small singular values
             for v in range(p['data']['nrow']):
@@ -538,6 +610,11 @@ def make_eeg_average_ref_proj(info, activate=True, verbose=None):
     eeg_proj: instance of Projection
         The SSP/PCA projector.
     """
+    if info.get('custom_ref_applied', False):
+        raise RuntimeError('Cannot add an average EEG reference projection '
+                           'since a custom reference has been applied to the '
+                           'data earlier.')
+
     logger.info("Adding average EEG reference projection.")
     eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
                          exclude='bads')
@@ -559,12 +636,25 @@ def make_eeg_average_ref_proj(info, activate=True, verbose=None):
 def _has_eeg_average_ref_proj(projs):
     """Determine if a list of projectors has an average EEG ref"""
     for proj in projs:
-        if proj['desc'] == 'Average EEG reference' or \
-                proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF:
+        if (proj['desc'] == 'Average EEG reference' or
+                proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
             return True
     return False
 
 
+def _needs_eeg_average_ref_proj(info):
+    """Determine if the EEG needs an averge EEG reference
+
+    This returns True if no custom reference has been applied and no average
+    reference projection is present in the list of projections.
+    """
+    eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude='bads')
+    return (len(eeg_sel) > 0 and
+            not info['custom_ref_applied'] and
+            not _has_eeg_average_ref_proj(info['projs']))
+
+
 @verbose
 def setup_proj(info, add_eeg_ref=True, activate=True,
                verbose=None):
@@ -590,14 +680,11 @@ def setup_proj(info, add_eeg_ref=True, activate=True,
         The modified measurement info (Warning: info is modified inplace).
     """
     # Add EEG ref reference proj if necessary
-    eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
-                         exclude='bads')
-    if len(eeg_sel) > 0 and not _has_eeg_average_ref_proj(info['projs']) \
-            and add_eeg_ref is True:
+    if _needs_eeg_average_ref_proj(info) and add_eeg_ref:
         eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
         info['projs'].append(eeg_proj)
 
-    #   Create the projector
+    # Create the projector
     projector, nproj = make_projector_info(info)
     if nproj == 0:
         if verbose:
@@ -608,7 +695,7 @@ def setup_proj(info, add_eeg_ref=True, activate=True,
         logger.info('Created an SSP operator (subspace dimension = %d)'
                     % nproj)
 
-    #   The projection items have been activated
+    # The projection items have been activated
     if activate:
         info['projs'] = activate_proj(info['projs'], copy=False)
 
@@ -619,7 +706,7 @@ def _uniquify_projs(projs):
     """Aux function"""
     final_projs = []
     for proj in projs:  # flatten
-        if not any([proj_equal(p, proj) for p in final_projs]):
+        if not any(_proj_equal(p, proj) for p in final_projs):
             final_projs.append(proj)
 
     my_count = count(len(final_projs))
diff --git a/mne/io/reference.py b/mne/io/reference.py
new file mode 100644
index 0000000..1fc0455
--- /dev/null
+++ b/mne/io/reference.py
@@ -0,0 +1,387 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+
+from .constants import FIFF
+from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj
+from .pick import pick_types
+from .base import _BaseRaw
+from ..evoked import Evoked
+from ..epochs import Epochs
+from ..utils import logger
+
+
+def _apply_reference(inst, ref_from, ref_to=None, copy=True):
+    """Apply a custom EEG referencing scheme.
+
+    Calculates a reference signal by taking the mean of a set of channels and
+    applies the reference to another set of channels. Input data can be in the
+    form of Raw, Epochs or Evoked.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Data containing the EEG channels and reference channel(s).
+    ref_from : list of str
+        The names of the channels to use to construct the reference. If an
+        empty list is specified, the data is assumed to already have a proper
+        reference and MNE will not attempt any re-referencing of the data.
+    ref_to : list of str | None
+        The names of the channels to apply the reference to. By default,
+        all EEG channels are chosen.
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        The data with EEG channels rereferenced.
+    ref_data : array, shape (n_times,)
+        Array of reference data subtracted from EEG channels.
+
+    Notes
+    -----
+    1. Do not use this function to apply an average reference. By default, an
+       average reference projection has already been added upon loading raw
+       data.
+
+    2. If the reference is applied to any EEG channels, this function removes
+       any pre-existing average reference projections.
+
+    3. During source localization, the EEG signal should have an average
+       reference.
+
+    4. The data must be preloaded.
+
+    See Also
+    --------
+    set_eeg_reference : Convenience function for creating an EEG reference.
+    set_bipolar_reference : Convenience function for creating a bipolar
+                            reference.
+    """
+    # Check to see that data is preloaded
+    if not isinstance(inst, Evoked) and not inst.preload:
+        raise RuntimeError('Data needs to be preloaded. Use '
+                           'preload=True (or string) in the constructor.')
+
+    eeg_idx = pick_types(inst.info, eeg=True, meg=False, ref_meg=False)
+
+    if ref_to is None:
+        ref_to = [inst.ch_names[i] for i in eeg_idx]
+
+    if copy:
+        inst = inst.copy()
+
+    # After referencing, existing SSPs might not be valid anymore.
+    for i, proj in enumerate(inst.info['projs']):
+        if (not proj['active'] and
+            len([ch for ch in (ref_from + ref_to)
+                 if ch in proj['data']['col_names']]) > 0):
+
+            # Remove any average reference projections, apply any other types
+            if proj['desc'] == 'Average EEG reference' or \
+                    proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF:
+                logger.info('Removing existing average EEG reference '
+                            'projection.')
+                del inst.info['projs'][i]
+            else:
+                logger.info(
+                    'Inactive signal space projection (SSP) operators are '
+                    'present that operate on sensors involved in the current '
+                    'referencing scheme. Applying them now. Be aware that '
+                    'after re-referencing, these operators will be invalid.')
+                inst.apply_proj()
+            break
+
+    ref_from = [inst.ch_names.index(ch) for ch in ref_from]
+    ref_to = [inst.ch_names.index(ch) for ch in ref_to]
+
+    if isinstance(inst, Evoked):
+        data = inst.data
+    else:
+        data = inst._data
+
+    # Compute reference
+    if len(ref_from) > 0:
+        ref_data = data[..., ref_from, :].mean(-2)
+
+        if isinstance(inst, Epochs):
+            data[:, ref_to, :] -= ref_data[:, np.newaxis, :]
+        else:
+            data[ref_to] -= ref_data
+    else:
+        ref_data = None
+
+    # If the reference touches EEG electrodes, note in the info that a non-CAR
+    # has been applied.
+    if len(np.intersect1d(ref_to, eeg_idx)) > 0:
+        inst.info['custom_ref_applied'] = True
+
+    return inst, ref_data
+
+
+def add_reference_channels(inst, ref_channels, copy=True):
+    """Add reference channels to data that consists of all zeros.
+
+    Adds reference channels to data that were not included during recording.
+    This is useful when you need to re-reference your data to different
+    channel. These added channels will consist of all zeros.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Instance of Raw or Epochs with EEG channels and reference channel(s).
+    ref_channels : str | list of str
+        Name of the electrode(s) which served as the reference in the
+        recording. If a name is provided, a corresponding channel is added
+        and its data is set to 0. This is useful for later re-referencing.
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with added EEG reference channels.
+    """
+    # Check to see that data is preloaded
+    if not isinstance(inst, Evoked) and not inst.preload:
+        raise RuntimeError('Data needs to be preloaded.')
+    if isinstance(ref_channels, str):
+        ref_channels = [ref_channels]
+    elif not isinstance(ref_channels, list):
+        raise ValueError("`ref_channels` should be either str or list of str. "
+                         "%s was provided." % type(ref_channels))
+    for ch in ref_channels:
+        if ch in inst.info['ch_names']:
+            raise ValueError("Channel %s already specified in inst." % ch)
+
+    if copy:
+        inst = inst.copy()
+
+    if isinstance(inst, Evoked):
+        data = inst.data
+        refs = np.zeros((len(ref_channels), data.shape[1]))
+        data = np.vstack((data, refs))
+        inst.data = data
+    elif isinstance(inst, _BaseRaw):
+        data = inst._data
+        refs = np.zeros((len(ref_channels), data.shape[1]))
+        data = np.vstack((data, refs))
+        inst._data = data
+    elif isinstance(inst, Epochs):
+        data = inst._data
+        x, y, z = data.shape
+        refs = np.zeros((x * len(ref_channels), z))
+        data = np.vstack((data.reshape((x * y, z), order='F'), refs))
+        data = data.reshape(x, y + len(ref_channels), z, order='F')
+        inst._data = data
+    else:
+        raise TypeError("inst should be Raw, Epochs, or Evoked instead of %s."
+                        % type(inst))
+    nchan = len(inst.info['ch_names'])
+    if ch in ref_channels:
+        chan_info = {'ch_name': ch,
+                     'coil_type': FIFF.FIFFV_COIL_EEG,
+                     'kind': FIFF.FIFFV_EEG_CH,
+                     'logno': nchan + 1,
+                     'scanno': nchan + 1,
+                     'cal': 1,
+                     'range': 1.,
+                     'unit_mul': 0.,
+                     'unit': FIFF.FIFF_UNIT_V,
+                     'coord_frame': FIFF.FIFFV_COORD_HEAD,
+                     'loc': np.zeros(12)}
+        inst.info['chs'].append(chan_info)
+    inst.info['ch_names'].extend(ref_channels)
+    inst.info['nchan'] = len(inst.info['ch_names'])
+    if isinstance(inst, _BaseRaw):
+        inst._cals = np.hstack((inst._cals, [1] * len(ref_channels)))
+
+    return inst
+
+
+def set_eeg_reference(inst, ref_channels=None, copy=True):
+    """Rereference EEG channels to new reference channel(s).
+
+    If multiple reference channels are specified, they will be averaged. If
+    no reference channels are specified, an average reference will be applied.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Instance of Raw or Epochs with EEG channels and reference channel(s).
+    ref_channels : list of str | None
+        The names of the channels to use to construct the reference. If None is
+        specified here, an average reference will be applied in the form of an
+        SSP projector. If an empty list is specified, the data is assumed to
+        already have a proper reference and MNE will not attempt any
+        re-referencing of the data. Defaults to an average reference (None).
+    copy : bool
+        Specifies whether the data will be copied (True) or modified in place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with EEG channels re-referenced.
+    ref_data : array
+        Array of reference data subtracted from EEG channels.
+
+    Notes
+    -----
+    1. If a reference is requested that is not the average reference, this
+       function removes any pre-existing average reference projections.
+
+    2. During source localization, the EEG signal should have an average
+       reference.
+
+    3. In order to apply a reference other than an average reference, the data
+       must be preloaded.
+
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    set_bipolar_reference : Convenience function for creating bipolar
+                            references.
+    """
+    if ref_channels is None:
+        # CAR requested
+        if _has_eeg_average_ref_proj(inst.info['projs']):
+            logger.warning('An average reference projection was already '
+                           'added. The data has been left untouched.')
+            return inst, None
+        else:
+            inst.info['custom_ref_applied'] = False
+            inst.add_proj(make_eeg_average_ref_proj(inst.info, activate=False))
+            return inst, None
+    else:
+        logger.info('Applying a custom EEG reference.')
+        return _apply_reference(inst, ref_channels, copy=copy)
+
+
+def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None,
+                          copy=True):
+    """Rereference selected channels using a bipolar referencing scheme.
+
+    A bipolar reference takes the difference between two channels (the anode
+    minus the cathode) and adds it as a new virtual channel. The original
+    channels will be dropped.
+
+    Multiple anodes and cathodes can be specified, in which case multiple
+    vitual channels will be created. The 1st anode will be substracted from the
+    1st cathode, the 2nd anode from the 2nd cathode, etc.
+
+    By default, the virtual channels will be annotated with channel info of
+    the anodes, their locations set to (0, 0, 0) and coil types set to
+    EEG_BIPOLAR.
+
+    Parameters
+    ----------
+    inst : instance of Raw | Epochs | Evoked
+        Data containing the unreferenced channels.
+    anode : str | list of str
+        The name(s) of the channel(s) to use as anode in the bipolar reference.
+    cathode : str | list of str
+        The name(s) of the channel(s) to use as cathode in the bipolar
+        reference.
+    ch_name : str | list of str | None
+        The channel name(s) for the virtual channel(s) containing the resulting
+        signal. By default, bipolar channels are named after the anode and
+        cathode, but it is recommended to supply a more meaningful name.
+    ch_info : dict | list of dict | None
+        This parameter can be used to supply a dictionary (or a dictionary for
+        each bipolar channel) containing channel information to merge in,
+        overwriting the default values. Defaults to None.
+    copy : bool
+        Whether to operate on a copy of the data (True) or modify it in-place
+        (False). Defaults to True.
+
+    Returns
+    -------
+    inst : instance of Raw | Epochs | Evoked
+        Data with the specified channels re-referenced.
+
+    Notes
+    -----
+    1. If the anodes contain any EEG channels, this function removes
+       any pre-existing average reference projections.
+
+    2. During source localization, the EEG signal should have an average
+       reference.
+
+    3. The data must be preloaded.
+
+    .. versionadded:: 0.9.0
+
+    See Also
+    --------
+    set_eeg_reference : Convenience function for creating an EEG reference.
+    """
+    if not isinstance(anode, list):
+        anode = [anode]
+
+    if not isinstance(cathode, list):
+        cathode = [cathode]
+
+    if len(anode) != len(cathode):
+        raise ValueError('Number of anodes must equal the number of cathodes.')
+
+    if ch_name is None:
+        ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)]
+    elif not isinstance(ch_name, list):
+        ch_name = [ch_name]
+    if len(ch_name) != len(anode):
+        raise ValueError('Number of channel names must equal the number of '
+                         'anodes/cathodes.')
+
+    # Check for duplicate channel names (it is allowed to give the name of the
+    # anode or cathode channel, as they will be replaced).
+    for ch, a, c in zip(ch_name, anode, cathode):
+        if ch not in [a, c] and ch in inst.ch_names:
+            raise ValueError('There is already a channel named "%s", please '
+                             'specify a different name for the bipolar '
+                             'channel using the ch_name parameter.' % ch)
+
+    if ch_info is None:
+        ch_info = [{} for an in anode]
+    elif not isinstance(ch_info, list):
+        ch_info = [ch_info]
+    if len(ch_info) != len(anode):
+        raise ValueError('Number of channel info dictionaries must equal the '
+                         'number of anodes/cathodes.')
+
+    # Merge specified and anode channel information dictionaries
+    new_ch_info = []
+    for an, ci in zip(anode, ch_info):
+        new_info = inst.info['chs'][inst.ch_names.index(an)].copy()
+
+        # Set channel location and coil type
+        new_info['loc'] = np.zeros(12)
+        new_info['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
+
+        new_info.update(ci)
+        new_ch_info.append(new_info)
+
+    if copy:
+        inst = inst.copy()
+
+    # Perform bipolar referencing
+    for an, ca, name, info in zip(anode, cathode, ch_name, new_ch_info):
+        inst, _ = _apply_reference(inst, [ca], [an], copy=False)
+        an_idx = inst.ch_names.index(an)
+        inst.info['chs'][an_idx] = info
+        inst.info['chs'][an_idx]['ch_name'] = name
+        inst.info['ch_names'][an_idx] = name
+        logger.info('Bipolar channel added as "%s".' % name)
+
+    # Drop cathode channels
+    inst.drop_channels(cathode)
+
+    return inst
diff --git a/mne/io/tag.py b/mne/io/tag.py
index 8b983da..1f95733 100644
--- a/mne/io/tag.py
+++ b/mne/io/tag.py
@@ -3,11 +3,9 @@
 #
 # License: BSD (3-clause)
 
-import struct
 import os
 import gzip
 import numpy as np
-from scipy import linalg
 
 from .constants import FIFF
 
@@ -103,8 +101,8 @@ def read_big(fid, size=None):
         >>> fid_gz = gzip.open(fname_gz, 'rb')
         >>> y = np.fromstring(read_big(fid_gz))
         >>> assert np.all(x == y)
-        >>> shutil.rmtree(os.path.dirname(fname))
         >>> fid_gz.close()
+        >>> shutil.rmtree(os.path.dirname(fname))
 
     """
     # buf_size is chosen as a largest working power of 2 (16 MB):
@@ -142,7 +140,7 @@ def read_tag_info(fid):
     s = fid.read(4 * 4)
     if len(s) == 0:
         return None
-    tag = Tag(*struct.unpack(">iiii", s))
+    tag = Tag(*np.fromstring(s, '>i4'))
     if tag.next == 0:
         fid.seek(tag.size, 1)
     elif tag.next > 0:
@@ -156,8 +154,11 @@ def _fromstring_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
         item_size = np.dtype(dtype).itemsize
         if not len(shape) == 2:
             raise ValueError('Only implemented for 2D matrices')
-        if not np.prod(shape) == tag_size / item_size:
-            raise ValueError('Wrong shape specified')
+        want_shape = np.prod(shape)
+        have_shape = tag_size // item_size
+        if want_shape != have_shape:
+            raise ValueError('Wrong shape specified, requested %s have %s'
+                             % (want_shape, have_shape))
         if not len(rlims) == 2:
             raise ValueError('rlims must have two elements')
         n_row_out = rlims[1] - rlims[0]
@@ -179,7 +180,7 @@ def _fromstring_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
     return out
 
 
-def _loc_to_trans(loc):
+def _loc_to_coil_trans(loc):
     """Helper to convert loc vector to coil_trans"""
     # deal with nasty OSX Anaconda bug by casting to float64
     loc = loc.astype(np.float64)
@@ -188,6 +189,20 @@ def _loc_to_trans(loc):
     return coil_trans
 
 
+def _coil_trans_to_loc(coil_trans):
+    """Helper to convert coil_trans to loc"""
+    coil_trans = coil_trans.astype(np.float64)
+    return np.roll(coil_trans.T[:, :3], 1, 0).flatten()
+
+
+def _loc_to_eeg_loc(loc):
+    """Helper to convert a loc to an EEG loc"""
+    if loc[3:6].any():
+        return np.array([loc[0:3], loc[3:6]]).T
+    else:
+        return loc[0:3][:, np.newaxis].copy()
+
+
 def read_tag(fid, pos=None, shape=None, rlims=None):
     """Read a Tag from a file at a given position
 
@@ -201,9 +216,10 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
         If tuple, the shape of the stored matrix. Only to be used with
         data stored as a vector (not implemented for matrices yet).
     rlims : tuple | None
-        If tuple, the first and last rows to retrieve. Note that data are
-        assumed to be stored row-major in the file. Only to be used with
-        data stored as a vector (not implemented for matrices yet).
+        If tuple, the first (inclusive) and last (exclusive) rows to retrieve.
+        Note that data are assumed to be stored row-major in the file. Only to
+        be used with data stored as a vector (not implemented for matrices
+        yet).
 
     Returns
     -------
@@ -214,7 +230,8 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
         fid.seek(pos, 0)
 
     s = fid.read(4 * 4)
-    tag = Tag(*struct.unpack(">iIii", s))
+
+    tag = Tag(*np.fromstring(s, dtype='>i4,>u4,>i4,>i4')[0])
 
     #
     #   The magic hexadecimal values
@@ -302,11 +319,20 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 shape = (dims[1], dims[2])
                 if matrix_coding == matrix_coding_CCS:
                     #    CCS
-                    sparse.csc_matrix()
-                    sparse_indices = np.fromstring(fid.read(4 * nnz),
-                                                   dtype='>i4')
-                    sparse_ptrs = np.fromstring(fid.read(4 * (ncol + 1)),
-                                                dtype='>i4')
+                    tmp_indices = fid.read(4 * nnz)
+                    sparse_indices = np.fromstring(tmp_indices, dtype='>i4')
+                    tmp_ptrs = fid.read(4 * (ncol + 1))
+                    sparse_ptrs = np.fromstring(tmp_ptrs, dtype='>i4')
+                    if (sparse_ptrs[-1] > len(sparse_indices) or
+                            np.any(sparse_ptrs < 0)):
+                        # There was a bug in MNE-C that caused some data to be
+                        # stored without byte swapping
+                        sparse_indices = np.concatenate(
+                            (np.fromstring(tmp_indices[:4 * (nrow + 1)],
+                                           dtype='>i4'),
+                             np.fromstring(tmp_indices[4 * (nrow + 1):],
+                                           dtype='<i4')))
+                        sparse_ptrs = np.fromstring(tmp_ptrs, dtype='<i4')
                     tag.data = sparse.csc_matrix((sparse_data, sparse_indices,
                                                  sparse_ptrs), shape=shape)
                 else:
@@ -391,20 +417,18 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 tag.data['r'] = np.fromstring(fid.read(12), dtype=">f4")
                 tag.data['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
             elif tag.type == FIFF.FIFFT_COORD_TRANS_STRUCT:
-                tag.data = dict()
-                tag.data['from'] = int(np.fromstring(fid.read(4), dtype=">i4"))
-                tag.data['to'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                from ..transforms import Transform
+                fro = int(np.fromstring(fid.read(4), dtype=">i4"))
+                to = int(np.fromstring(fid.read(4), dtype=">i4"))
                 rot = np.fromstring(fid.read(36), dtype=">f4").reshape(3, 3)
                 move = np.fromstring(fid.read(12), dtype=">f4")
-                tag.data['trans'] = np.r_[np.c_[rot, move],
-                                          np.array([[0], [0], [0], [1]]).T]
-                #
+                trans = np.r_[np.c_[rot, move],
+                              np.array([[0], [0], [0], [1]]).T]
+                tag.data = Transform(fro, to, trans)
                 # Skip over the inverse transformation
-                # It is easier to just use inverse of trans in Matlab
-                #
                 fid.seek(12 * 4, 1)
             elif tag.type == FIFF.FIFFT_CH_INFO_STRUCT:
-                d = dict()
+                d = tag.data = dict()
                 d['scanno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
                 d['logno'] = int(np.fromstring(fid.read(4), dtype=">i4"))
                 d['kind'] = int(np.fromstring(fid.read(4), dtype=">i4"))
@@ -415,40 +439,29 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 #   Read the coil coordinate system definition
                 #
                 d['loc'] = np.fromstring(fid.read(48), dtype=">f4")
-                d['coil_trans'] = None
-                d['eeg_loc'] = None
-                d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
-                tag.data = d
+                # deal with nasty OSX Anaconda bug by casting to float64
+                d['loc'] = d['loc'].astype(np.float64)
                 #
                 #   Convert loc into a more useful format
                 #
-                loc = tag.data['loc']
-                kind = tag.data['kind']
+                kind = d['kind']
                 if kind in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]:
-                    tag.data['coil_trans'] = _loc_to_trans(loc)
-                    tag.data['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
-                elif tag.data['kind'] == FIFF.FIFFV_EEG_CH:
-                    # deal with nasty OSX Anaconda bug by casting to float64
-                    loc = loc.astype(np.float64)
-                    if linalg.norm(loc[3:6]) > 0.:
-                        tag.data['eeg_loc'] = np.c_[loc[0:3], loc[3:6]]
-                    else:
-                        tag.data['eeg_loc'] = loc[0:3][:, np.newaxis].copy()
-                    tag.data['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                    d['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+                elif d['kind'] == FIFF.FIFFV_EEG_CH:
+                    d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+                else:
+                    d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
                 #
                 #   Unit and exponent
                 #
-                tag.data['unit'] = int(np.fromstring(fid.read(4), dtype=">i4"))
-                tag.data['unit_mul'] = int(np.fromstring(fid.read(4),
-                                                         dtype=">i4"))
+                d['unit'] = int(np.fromstring(fid.read(4), dtype=">i4"))
+                d['unit_mul'] = int(np.fromstring(fid.read(4), dtype=">i4"))
                 #
                 #   Handle the channel name
                 #
                 ch_name = np.fromstring(fid.read(16), dtype=">c")
                 ch_name = ch_name[:np.argmax(ch_name == b'')].tostring()
-                # Use unicode or bytes depending on Py2/3
-                tag.data['ch_name'] = str(ch_name.decode())
-
+                d['ch_name'] = ch_name.decode()
             elif tag.type == FIFF.FIFFT_OLD_PACK:
                 offset = float(np.fromstring(fid.read(4), dtype=">f4"))
                 scale = float(np.fromstring(fid.read(4), dtype=">f4"))
@@ -458,7 +471,8 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
                 tag.data = list()
                 for _ in range(tag.size // 16 - 1):
                     s = fid.read(4 * 4)
-                    tag.data.append(Tag(*struct.unpack(">iIii", s)))
+                    tag.data.append(Tag(*np.fromstring(
+                        s, dtype='>i4,>u4,>i4,>i4')[0]))
             elif tag.type == FIFF.FIFFT_JULIAN:
                 tag.data = int(np.fromstring(fid.read(4), dtype=">i4"))
                 tag.data = jd2jcal(tag.data)
@@ -474,12 +488,25 @@ def read_tag(fid, pos=None, shape=None, rlims=None):
 
 def find_tag(fid, node, findkind):
     """Find Tag in an open FIF file descriptor
+
+    Parameters
+    ----------
+    fid : file-like
+        Open file.
+    node : dict
+        Node to search.
+    findkind : int
+        Tag kind to find.
+
+    Returns
+    -------
+    tag : instance of Tag
+        The first tag found.
     """
     for p in range(node['nent']):
         if node['directory'][p].kind == findkind:
             return read_tag(fid, node['directory'][p].pos)
-    tag = None
-    return tag
+    return None
 
 
 def has_tag(node, kind):
diff --git a/mne/io/tests/data/test_ica.lout b/mne/io/tests/data/test_ica.lout
index 75a63bc..079e4c0 100644
--- a/mne/io/tests/data/test_ica.lout
+++ b/mne/io/tests/data/test_ica.lout
@@ -1,3 +1,3 @@
-   -0.03     0.63    -0.03     0.33
-000     0.00     0.00     0.30     0.30 ICA 001
-001     0.30     0.00     0.30     0.30 ICA 002
+    0.00     1.00     0.00     1.00
+000     0.00     0.00     0.47     0.47 ICA 001
+001     0.53     0.00     0.47     0.47 ICA 002
diff --git a/mne/io/tests/test_apply_function.py b/mne/io/tests/test_apply_function.py
new file mode 100644
index 0000000..7adfede
--- /dev/null
+++ b/mne/io/tests/test_apply_function.py
@@ -0,0 +1,58 @@
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+from nose.tools import assert_equal, assert_raises
+
+from mne import create_info
+from mne.io import RawArray
+from mne.utils import logger, set_log_file, slow_test, _TempDir
+
+
+def bad_1(x):
+    return  # bad return type
+
+
+def bad_2(x):
+    return x[:-1]  # bad shape
+
+
+def printer(x):
+    logger.info('exec')
+    return x
+
+
+ at slow_test
+def test_apply_function_verbose():
+    """Test apply function verbosity
+    """
+    n_chan = 2
+    n_times = 3
+    ch_names = [str(ii) for ii in range(n_chan)]
+    raw = RawArray(np.zeros((n_chan, n_times)),
+                   create_info(ch_names, 1., 'mag'))
+    # test return types in both code paths (parallel / 1 job)
+    assert_raises(TypeError, raw.apply_function, bad_1,
+                  None, None, 1)
+    assert_raises(ValueError, raw.apply_function, bad_2,
+                  None, None, 1)
+    assert_raises(TypeError, raw.apply_function, bad_1,
+                  None, None, 2)
+    assert_raises(ValueError, raw.apply_function, bad_2,
+                  None, None, 2)
+
+    # check our arguments
+    tempdir = _TempDir()
+    test_name = op.join(tempdir, 'test.log')
+    set_log_file(test_name)
+    try:
+        raw.apply_function(printer, None, None, 1, verbose=False)
+        with open(test_name) as fid:
+            assert_equal(len(fid.readlines()), 0)
+        raw.apply_function(printer, None, None, 1, verbose=True)
+        with open(test_name) as fid:
+            assert_equal(len(fid.readlines()), n_chan)
+    finally:
+        set_log_file(None)
diff --git a/mne/io/tests/test_compensator.py b/mne/io/tests/test_compensator.py
index 3620c86..bc15630 100644
--- a/mne/io/tests/test_compensator.py
+++ b/mne/io/tests/test_compensator.py
@@ -15,12 +15,11 @@ from mne.utils import _TempDir, requires_mne, run_subprocess
 base_dir = op.join(op.dirname(__file__), 'data')
 ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
 
-tempdir = _TempDir()
-
 
 def test_compensation():
     """Test compensation
     """
+    tempdir = _TempDir()
     raw = Raw(ctf_comp_fname, compensation=None)
     comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
     assert_true(comp1.shape == (340, 340))
@@ -44,6 +43,8 @@ def test_compensation():
 def test_compensation_mne():
     """Test comensation by comparing with MNE
     """
+    tempdir = _TempDir()
+
     def make_evoked(fname, comp):
         raw = Raw(fname, compensation=comp)
         picks = pick_types(raw.info, meg=True, ref_meg=True)
diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py
index 4cdb190..4c81bfb 100644
--- a/mne/io/tests/test_meas_info.py
+++ b/mne/io/tests/test_meas_info.py
@@ -1,26 +1,92 @@
+# -*- coding: utf-8 -*-
+
 import os.path as op
 
-from nose.tools import assert_true, assert_equal, assert_raises
+from nose.tools import assert_false, assert_equal, assert_raises, assert_true
 import numpy as np
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_allclose
 
-from mne import io, Epochs, read_events
-from mne.io import read_fiducials, write_fiducials
+from mne import Epochs, read_events
+from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc,
+                    _loc_to_coil_trans, Raw, read_info, write_info)
 from mne.io.constants import FIFF
-from mne.io.meas_info import Info
-from mne.utils import _TempDir
+from mne.io.meas_info import (Info, create_info, _write_dig_points,
+                              _read_dig_points, _make_dig_points)
+from mne.utils import _TempDir, run_tests_if_main
+from mne.channels.montage import read_montage, read_dig_montage
 
 base_dir = op.join(op.dirname(__file__), 'data')
 fiducials_fname = op.join(base_dir, 'fsaverage-fiducials.fif')
 raw_fname = op.join(base_dir, 'test_raw.fif')
+chpi_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
 evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+kit_data_dir = op.join(op.dirname(__file__), '..', 'kit', 'tests', 'data')
+hsp_fname = op.join(kit_data_dir, 'test_hsp.txt')
+elp_fname = op.join(kit_data_dir, 'test_elp.txt')
+
+
+def test_coil_trans():
+    """Test loc<->coil_trans functions"""
+    rng = np.random.RandomState(0)
+    x = rng.randn(4, 4)
+    x[3] = [0, 0, 0, 1]
+    assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x)
+    x = rng.randn(12)
+    assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x)
 
-tempdir = _TempDir()
+
+def test_make_info():
+    """Test some create_info properties
+    """
+    n_ch = 1
+    info = create_info(n_ch, 1000., 'eeg')
+    coil_types = set([ch['coil_type'] for ch in info['chs']])
+    assert_true(FIFF.FIFFV_COIL_EEG in coil_types)
+
+    assert_raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)
+    assert_raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)
+    assert_raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types=['eeg', 'eeg'])
+    assert_raises(TypeError, create_info, ch_names=[np.array([1])],
+                  sfreq=1000)
+    assert_raises(TypeError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types=np.array([1]))
+    assert_raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,
+                  ch_types='awesome')
+    assert_raises(TypeError, create_info, ['Test Ch'], sfreq=1000,
+                  ch_types=None, montage=np.array([1]))
+    m = read_montage('biosemi32')
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=m)
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+
+    names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+    d = read_dig_montage(hsp_fname, None, elp_fname, names, unit='m',
+                         transform=False)
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=d)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
+
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=[d, m])
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
+    info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg',
+                       montage=[d, 'biosemi32'])
+    ch_pos = [ch['loc'][:3] for ch in info['chs']]
+    assert_array_equal(ch_pos, m.pos)
+    idents = [p['ident'] for p in info['dig']]
+    assert_true(FIFF.FIFFV_POINT_NASION in idents)
 
 
 def test_fiducials_io():
     """Test fiducials i/o"""
+    tempdir = _TempDir()
     pts, coord_frame = read_fiducials(fiducials_fname)
     assert_equal(pts[0]['coord_frame'], FIFF.FIFFV_COORD_MRI)
     assert_equal(pts[0]['ident'], FIFF.FIFFV_POINT_CARDINAL)
@@ -42,7 +108,7 @@ def test_fiducials_io():
 
 def test_info():
     """Test info object"""
-    raw = io.Raw(raw_fname)
+    raw = Raw(raw_fname)
     event_id, tmin, tmax = 1, -0.2, 0.5
     events = read_events(event_name)
     event_id = int(events[0, 2])
@@ -71,14 +137,75 @@ def test_info():
 def test_read_write_info():
     """Test IO of info
     """
-    info = io.read_info(raw_fname)
+    tempdir = _TempDir()
+    info = read_info(raw_fname)
     temp_file = op.join(tempdir, 'info.fif')
     # check for bug `#1198`
     info['dev_head_t']['trans'] = np.eye(4)
-    t1 =  info['dev_head_t']['trans']
-    io.write_info(temp_file, info)
-    info2 = io.read_info(temp_file)
+    t1 = info['dev_head_t']['trans']
+    write_info(temp_file, info)
+    info2 = read_info(temp_file)
     t2 = info2['dev_head_t']['trans']
     assert_true(len(info['chs']) == len(info2['chs']))
     assert_array_equal(t1, t2)
-
+    # proc_history (e.g., GH#1875)
+    creator = u'é'
+    info = read_info(chpi_fname)
+    info['proc_history'][0]['creator'] = creator
+    info['hpi_meas'][0]['creator'] = creator
+    info['subject_info']['his_id'] = creator
+    write_info(temp_file, info)
+    info = read_info(temp_file)
+    assert_equal(info['proc_history'][0]['creator'], creator)
+    assert_equal(info['hpi_meas'][0]['creator'], creator)
+    assert_equal(info['subject_info']['his_id'], creator)
+
+
+def test_io_dig_points():
+    """Test Writing for dig files"""
+    tempdir = _TempDir()
+    points = _read_dig_points(hsp_fname)
+
+    dest = op.join(tempdir, 'test.txt')
+    dest_bad = op.join(tempdir, 'test.mne')
+    assert_raises(ValueError, _write_dig_points, dest, points[:, :2])
+    assert_raises(ValueError, _write_dig_points, dest_bad, points)
+    _write_dig_points(dest, points)
+    points1 = _read_dig_points(dest)
+    err = "Dig points diverged after writing and reading."
+    assert_array_equal(points, points1, err)
+
+    points2 = np.array([[-106.93, 99.80], [99.80, 68.81]])
+    np.savetxt(dest, points2, delimiter='\t', newline='\n')
+    assert_raises(ValueError, _read_dig_points, dest)
+
+
+def test_make_dig_points():
+    """Test application of Polhemus HSP to info"""
+    dig_points = _read_dig_points(hsp_fname)
+    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
+    assert_false(info['dig'])
+
+    info['dig'] = _make_dig_points(dig_points=dig_points)
+    assert_true(info['dig'])
+    assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81])
+
+    dig_points = _read_dig_points(elp_fname)
+    nasion, lpa, rpa = dig_points[:3]
+    info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
+    assert_false(info['dig'])
+
+    info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None)
+    assert_true(info['dig'])
+    idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
+    assert_array_equal(info['dig'][idx]['r'],
+                       np.array([1.3930, 13.1613, -4.6967]))
+    assert_raises(ValueError, _make_dig_points, nasion[:2])
+    assert_raises(ValueError, _make_dig_points, None, lpa[:2])
+    assert_raises(ValueError, _make_dig_points, None, None, rpa[:2])
+    assert_raises(ValueError, _make_dig_points, None, None, None,
+                  dig_points[:, :2])
+    assert_raises(ValueError, _make_dig_points, None, None, None, None,
+                  dig_points[:, :2])
+
+run_tests_if_main()
diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py
index 2fda567..80e2767 100644
--- a/mne/io/tests/test_pick.py
+++ b/mne/io/tests/test_pick.py
@@ -1,5 +1,21 @@
+from nose.tools import assert_equal, assert_raises
 from numpy.testing import assert_array_equal
-from mne import pick_channels_regexp
+import numpy as np
+import os.path as op
+
+from mne import (pick_channels_regexp, pick_types, Epochs,
+                 read_forward_solution, rename_channels,
+                 pick_info, pick_channels, __file__)
+
+from mne.io.meas_info import create_info
+from mne.io.array import RawArray
+from mne.io.pick import (channel_indices_by_type, channel_type,
+                         pick_types_forward, _picks_by_type)
+from mne.io.constants import FIFF
+from mne.io import Raw
+from mne.datasets import testing
+from mne.forward.tests import test_forward
+from mne.utils import run_tests_if_main
 
 
 def test_pick_channels_regexp():
@@ -9,3 +25,162 @@ def test_pick_channels_regexp():
     assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0])
     assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2])
     assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2])
+
+
+def test_pick_seeg():
+    """Test picking with SEEG
+    """
+    names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
+    types = 'mag mag eeg eeg seeg seeg seeg'.split()
+    info = create_info(names, 1024., types)
+    idx = channel_indices_by_type(info)
+    assert_array_equal(idx['mag'], [0, 1])
+    assert_array_equal(idx['eeg'], [2, 3])
+    assert_array_equal(idx['seeg'], [4, 5, 6])
+    assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
+    for i, t in enumerate(types):
+        assert_equal(channel_type(info, i), types[i])
+    raw = RawArray(np.zeros((len(names), 10)), info)
+    events = np.array([[1, 0, 0], [2, 0, 0]])
+    epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
+    evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
+    e_seeg = evoked.pick_types(meg=False, seeg=True, copy=True)
+    for l, r in zip(e_seeg.ch_names, names[4:]):
+        assert_equal(l, r)
+
+
+def _check_fwd_n_chan_consistent(fwd, n_expected):
+    n_ok = len(fwd['info']['ch_names'])
+    n_sol = fwd['sol']['data'].shape[0]
+    assert_equal(n_expected, n_sol)
+    assert_equal(n_expected, n_ok)
+
+
+ at testing.requires_testing_data
+def test_pick_forward_seeg():
+    """Test picking forward with SEEG
+    """
+    fwd = read_forward_solution(test_forward.fname_meeg)
+    counts = channel_indices_by_type(fwd['info'])
+    for key in counts.keys():
+        counts[key] = len(counts[key])
+    counts['meg'] = counts['mag'] + counts['grad']
+    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
+    # should raise exception related to emptiness
+    assert_raises(ValueError, pick_types_forward, fwd, meg=False, eeg=False,
+                  seeg=True)
+    # change last chan from EEG to sEEG
+    seeg_name = 'OTp1'
+    rename_channels(fwd['info'], {'EEG 060': seeg_name})
+    for ch in fwd['info']['chs']:
+        if ch['ch_name'] == seeg_name:
+            ch['kind'] = FIFF.FIFFV_SEEG_CH
+            ch['coil_type'] = FIFF.FIFFV_COIL_EEG
+    fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name']
+    counts['eeg'] -= 1
+    counts['seeg'] += 1
+    # repick & check
+    fwd_seeg = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
+    assert_equal(fwd_seeg['sol']['row_names'], [seeg_name])
+    assert_equal(fwd_seeg['info']['ch_names'], [seeg_name])
+    # should work fine
+    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
+    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
+    fwd_ = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
+    _check_fwd_n_chan_consistent(fwd_, counts['seeg'])
+
+
+def test_picks_by_channels():
+    """Test creating pick_lists"""
+
+    rng = np.random.RandomState(909)
+
+    test_data = rng.random_sample((4, 2000))
+    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
+    ch_types = ['grad', 'mag', 'mag', 'eeg']
+    sfreq = 250.0
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+    raw = RawArray(test_data, info)
+
+    pick_list = _picks_by_type(raw.info)
+    assert_equal(len(pick_list), 3)
+    assert_equal(pick_list[0][0], 'mag')
+    pick_list2 = _picks_by_type(raw.info, meg_combined=False)
+    assert_equal(len(pick_list), len(pick_list2))
+    assert_equal(pick_list2[0][0], 'mag')
+
+    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
+    assert_equal(len(pick_list), len(pick_list2) + 1)
+    assert_equal(pick_list2[0][0], 'meg')
+
+    test_data = rng.random_sample((4, 2000))
+    ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
+    ch_types = ['mag', 'mag', 'mag', 'mag']
+    sfreq = 250.0
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+    raw = RawArray(test_data, info)
+
+    # Make sure checks for list input work.
+    assert_raises(ValueError, pick_channels, ch_names, 'MEG 001')
+    assert_raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi')
+
+    pick_list = _picks_by_type(raw.info)
+    assert_equal(len(pick_list), 1)
+    assert_equal(pick_list[0][0], 'mag')
+    pick_list2 = _picks_by_type(raw.info, meg_combined=True)
+    assert_equal(len(pick_list), len(pick_list2))
+    assert_equal(pick_list2[0][0], 'mag')
+
+
+def test_clean_info_bads():
+    """Test cleaning info['bads'] when bad_channels are excluded """
+
+    raw_file = op.join(op.dirname(__file__), 'io', 'tests', 'data',
+                       'test_raw.fif')
+    raw = Raw(raw_file)
+
+    # select eeg channels
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+
+    # select 3 eeg channels as bads
+    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
+    eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]
+
+    # select meg channels
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+
+    # select randomly 3 meg channels as bads
+    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
+    meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]
+
+    # simulate the bad channels
+    raw.info['bads'] = eeg_bad_ch + meg_bad_ch
+
+    # simulate the call to pick_info excluding the bad eeg channels
+    info_eeg = pick_info(raw.info, picks_eeg)
+
+    # simulate the call to pick_info excluding the bad meg channels
+    info_meg = pick_info(raw.info, picks_meg)
+
+    assert_equal(info_eeg['bads'], eeg_bad_ch)
+    assert_equal(info_meg['bads'], meg_bad_ch)
+
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['bads'] += ['EEG 053']
+    assert_raises(RuntimeError, info._check_consistency)
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['ch_names'][0] += 'f'
+    assert_raises(RuntimeError, info._check_consistency)
+    info = pick_info(raw.info, picks_meg)
+    info._check_consistency()
+    info['nchan'] += 1
+    assert_raises(RuntimeError, info._check_consistency)
+
+run_tests_if_main()
diff --git a/mne/io/tests/test_proc_history.py b/mne/io/tests/test_proc_history.py
new file mode 100644
index 0000000..555b08d
--- /dev/null
+++ b/mne/io/tests/test_proc_history.py
@@ -0,0 +1,47 @@
+# Authors: Denis A. Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+# License: Simplified BSD
+
+import numpy as np
+import os.path as op
+from mne import io
+from mne.io.constants import FIFF
+from mne.io.proc_history import _get_sss_rank
+from nose.tools import assert_true, assert_equal
+
+base_dir = op.join(op.dirname(__file__), 'data')
+raw_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+
+
+def test_maxfilter_io():
+    """test maxfilter io"""
+    raw = io.Raw(raw_fname)
+    mf = raw.info['proc_history'][1]['max_info']
+
+    assert_true(mf['sss_info']['frame'], FIFF.FIFFV_COORD_HEAD)
+    # based on manual 2.0, rev. 5.0 page 23
+    assert_true(5 <= mf['sss_info']['in_order'] <= 11)
+    assert_true(mf['sss_info']['out_order'] <= 5)
+    assert_true(mf['sss_info']['nchan'] > len(mf['sss_info']['components']))
+
+    assert_equal(raw.ch_names[:mf['sss_info']['nchan']],
+                 mf['sss_ctc']['proj_items_chs'])
+    assert_equal(mf['sss_ctc']['decoupler'].shape,
+                 (mf['sss_info']['nchan'], mf['sss_info']['nchan']))
+    assert_equal(np.unique(np.diag(mf['sss_ctc']['decoupler'].toarray())),
+                 np.array([1.], dtype=np.float32))
+
+    assert_equal(mf['sss_cal']['cal_corrs'].shape, (306, 14))
+    assert_equal(mf['sss_cal']['cal_chans'].shape, (306, 2))
+    vv_coils = [v for k, v in FIFF.items() if 'FIFFV_COIL_VV' in k]
+    assert_true(all(k in vv_coils
+                    for k in set(mf['sss_cal']['cal_chans'][:, 1])))
+
+
+def test_maxfilter_get_rank():
+    """test maxfilter rank lookup"""
+    raw = io.Raw(raw_fname)
+    mf = raw.info['proc_history'][0]['max_info']
+    rank1 = mf['sss_info']['nfree']
+    rank2 = _get_sss_rank(mf)
+    assert_equal(rank1, rank2)
diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py
new file mode 100644
index 0000000..9d79349
--- /dev/null
+++ b/mne/io/tests/test_raw.py
@@ -0,0 +1,51 @@
+# Generic tests that all raw classes should run
+from os import path as op
+from numpy.testing import assert_allclose
+
+from mne.datasets import testing
+from mne.io import Raw
+
+
+def _test_concat(reader, *args):
+    """Test concatenation of raw classes that allow not preloading"""
+    data = None
+    for preload in (True, False):
+        raw1 = reader(*args, preload=preload)
+        raw2 = reader(*args, preload=preload)
+        raw1.append(raw2)
+        raw1.load_data()
+        if data is None:
+            data = raw1[:, :][0]
+        assert_allclose(data, raw1[:, :][0])
+    for first_preload in (True, False):
+        raw = reader(*args, preload=first_preload)
+        data = raw[:, :][0]
+        for preloads in ((True, True), (True, False), (False, False)):
+            for last_preload in (True, False):
+                print(first_preload, preloads, last_preload)
+                raw1 = raw.crop(0, 0.4999)
+                if preloads[0]:
+                    raw1.load_data()
+                raw2 = raw.crop(0.5, None)
+                if preloads[1]:
+                    raw2.load_data()
+                raw1.append(raw2)
+                if last_preload:
+                    raw1.load_data()
+                assert_allclose(data, raw1[:, :][0])
+
+
+ at testing.requires_testing_data
+def test_time_index():
+    """Test indexing of raw times"""
+    raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
+                        'data', 'test_raw.fif')
+    raw = Raw(raw_fname)
+
+    # Test original (non-rounding) indexing behavior
+    orig_inds = raw.time_as_index(raw.times)
+    assert(len(set(orig_inds)) != len(orig_inds))
+
+    # Test new (rounding) indexing behavior
+    new_inds = raw.time_as_index(raw.times, use_rounding=True)
+    assert(len(set(new_inds)) == len(new_inds))
diff --git a/mne/io/tests/test_reference.py b/mne/io/tests/test_reference.py
new file mode 100644
index 0000000..7ce82d5
--- /dev/null
+++ b/mne/io/tests/test_reference.py
@@ -0,0 +1,307 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
+import os.path as op
+import numpy as np
+
+from nose.tools import assert_true, assert_equal, assert_raises
+from numpy.testing import assert_array_equal, assert_allclose
+
+from mne import pick_types, Evoked, Epochs, read_events
+from mne.io.constants import FIFF
+from mne.io import (set_eeg_reference, set_bipolar_reference,
+                    add_reference_channels)
+from mne.io.proj import _has_eeg_average_ref_proj
+from mne.io.reference import _apply_reference
+from mne.datasets import testing
+from mne.io import Raw
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
+ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
+
+
+def _test_reference(raw, reref, ref_data, ref_from):
+    """Helper function to test whether a reference has been correctly
+    applied."""
+    # Separate EEG channels from other channel types
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
+                             stim=True, exclude='bads')
+
+    # Calculate indices of reference channesl
+    picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
+
+    # Get data
+    if isinstance(raw, Evoked):
+        _data = raw.data
+        _reref = reref.data
+    else:
+        _data = raw._data
+        _reref = reref._data
+
+    # Check that the ref has been properly computed
+    assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
+
+    # Get the raw EEG data and other channel data
+    raw_eeg_data = _data[..., picks_eeg, :]
+    raw_other_data = _data[..., picks_other, :]
+
+    # Get the rereferenced EEG data
+    reref_eeg_data = _reref[..., picks_eeg, :]
+    reref_other_data = _reref[..., picks_other, :]
+
+    # Undo rereferencing of EEG channels
+    if isinstance(raw, Epochs):
+        unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
+    else:
+        unref_eeg_data = reref_eeg_data + ref_data
+
+    # Check that both EEG data and other data is the same
+    assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
+    assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
+
+
+ at testing.requires_testing_data
+def test_apply_reference():
+    """Test base function for rereferencing"""
+    raw = Raw(fif_fname, preload=True)
+
+    # Rereference raw data by creating a copy of original data
+    reref, ref_data = _apply_reference(raw, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # The CAR reference projection should have been removed by the function
+    assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
+
+    # Test that disabling the reference does not break anything
+    reref, ref_data = _apply_reference(raw, [])
+    assert_array_equal(raw._data, reref._data)
+
+    # Test that data is modified in place when copy=False
+    reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'],
+                                       copy=False)
+    assert_true(raw is reref)
+
+    # Test re-referencing Epochs object
+    raw = Raw(fif_fname, preload=False, add_eeg_ref=False)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    reref, ref_data = _apply_reference(epochs, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test re-referencing Evoked object
+    evoked = epochs.average()
+    reref, ref_data = _apply_reference(evoked, ref_from=['EEG 001', 'EEG 002'],
+                                       copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test invalid input
+    raw_np = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
+
+
+ at testing.requires_testing_data
+def test_set_eeg_reference():
+    """Test rereference eeg data"""
+    raw = Raw(fif_fname, preload=True)
+    raw.info['projs'] = []
+
+    # Test setting an average reference
+    assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
+    reref, ref_data = set_eeg_reference(raw)
+    assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
+    assert_true(ref_data is None)
+
+    # Test setting an average reference when one was already present
+    reref, ref_data = set_eeg_reference(raw, copy=False)
+    assert_true(ref_data is None)
+
+    # Rereference raw data by creating a copy of original data
+    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
+    assert_true(reref.info['custom_ref_applied'])
+    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
+
+    # Test that data is modified in place when copy=False
+    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
+                                        copy=False)
+    assert_true(raw is reref)
+
+
+ at testing.requires_testing_data
+def test_set_bipolar_reference():
+    """Test bipolar referencing"""
+    raw = Raw(fif_fname, preload=True)
+    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
+                                  {'kind': FIFF.FIFFV_EOG_CH,
+                                   'extra': 'some extra value'})
+    assert_true(reref.info['custom_ref_applied'])
+
+    # Compare result to a manual calculation
+    a = raw.pick_channels(['EEG 001', 'EEG 002'], copy=True)
+    a = a._data[0, :] - a._data[1, :]
+    b = reref.pick_channels(['bipolar'], copy=True)._data[0, :]
+    assert_allclose(a, b)
+
+    # Original channels should be replaced by a virtual one
+    assert_true('EEG 001' not in reref.ch_names)
+    assert_true('EEG 002' not in reref.ch_names)
+    assert_true('bipolar' in reref.ch_names)
+
+    # Check channel information
+    bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
+    an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
+    for key in bp_info:
+        if key == 'loc':
+            assert_array_equal(bp_info[key], 0)
+        elif key == 'coil_type':
+            assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
+        elif key == 'kind':
+            assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
+        else:
+            assert_equal(bp_info[key], an_info[key])
+    assert_equal(bp_info['extra'], 'some extra value')
+
+    # Minimalist call
+    reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
+    assert_true('EEG 001-EEG 002' in reref.ch_names)
+
+    # Test creating a bipolar reference that doesn't involve EEG channels:
+    # it should not set the custom_ref_applied flag
+    reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
+                                  ch_info={'kind': FIFF.FIFFV_MEG_CH})
+    assert_true(not reref.info['custom_ref_applied'])
+    assert_true('MEG 0111-MEG 0112' in reref.ch_names)
+
+    # Test a battery of invalid inputs
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', 'bipolar',
+                  ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
+    assert_raises(ValueError, set_bipolar_reference, raw,
+                  'EEG 001', 'EEG 002', ch_name='EEG 003')
+
+
+ at testing.requires_testing_data
+def test_add_reference():
+    raw = Raw(fif_fname, preload=True)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    # check if channel already exists
+    assert_raises(ValueError, add_reference_channels,
+                  raw, raw.info['ch_names'][0])
+    # add reference channel to Raw
+    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
+    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
+    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
+
+    orig_nchan = raw.info['nchan']
+    raw = add_reference_channels(raw, 'Ref', copy=False)
+    assert_array_equal(raw._data, raw_ref._data)
+    assert_equal(raw.info['nchan'], orig_nchan + 1)
+
+    ref_idx = raw.ch_names.index('Ref')
+    ref_data, _ = raw[ref_idx]
+    assert_array_equal(ref_data, 0)
+
+    # add two reference channels to Raw
+    raw = Raw(fif_fname, preload=True)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    assert_raises(ValueError, add_reference_channels, raw,
+                  raw.info['ch_names'][0])
+    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
+    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
+    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
+
+    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
+    ref_idx = raw.ch_names.index('M1')
+    ref_idy = raw.ch_names.index('M2')
+    ref_data, _ = raw[[ref_idx, ref_idy]]
+    assert_array_equal(ref_data, 0)
+
+    # add reference channel to epochs
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
+    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
+    ref_idx = epochs_ref.ch_names.index('Ref')
+    ref_data = epochs_ref.get_data()[:, ref_idx, :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
+    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
+                       epochs_ref.get_data()[:, picks_eeg, :])
+
+    # add two reference channels to epochs
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
+    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
+    ref_idx = epochs_ref.ch_names.index('M1')
+    ref_idy = epochs_ref.ch_names.index('M2')
+    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
+    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
+                       epochs_ref.get_data()[:, picks_eeg, :])
+
+    # add reference channel to evoked
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    evoked = epochs.average()
+    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
+    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
+    ref_idx = evoked_ref.ch_names.index('Ref')
+    ref_data = evoked_ref.data[ref_idx, :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
+    assert_array_equal(evoked.data[picks_eeg, :],
+                       evoked_ref.data[picks_eeg, :])
+
+    # add two reference channels to evoked
+    raw = Raw(fif_fname, preload=True)
+    events = read_events(eve_fname)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
+                    picks=picks_eeg, preload=True)
+    evoked = epochs.average()
+    evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
+    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
+    ref_idx = evoked_ref.ch_names.index('M1')
+    ref_idy = evoked_ref.ch_names.index('M2')
+    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
+    assert_array_equal(ref_data, 0)
+    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
+    assert_array_equal(evoked.data[picks_eeg, :],
+                       evoked_ref.data[picks_eeg, :])
+
+    # Test invalid inputs
+    raw_np = Raw(fif_fname, preload=False)
+    assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
+    assert_raises(ValueError, add_reference_channels, raw, 1)
diff --git a/mne/io/tree.py b/mne/io/tree.py
index 981dc20..dccfd4e 100644
--- a/mne/io/tree.py
+++ b/mne/io/tree.py
@@ -3,7 +3,6 @@
 #
 # License: BSD (3-clause)
 
-import struct
 import numpy as np
 
 from .constants import FIFF
@@ -14,11 +13,19 @@ from ..utils import logger, verbose
 
 
 def dir_tree_find(tree, kind):
-    """[nodes] = dir_tree_find(tree,kind)
-
-       Find nodes of the given kind from a directory tree structure
-
-       Returns a list of matching nodes
+    """Find nodes of the given kind from a directory tree structure
+
+    Parameters
+    ----------
+    tree : dict
+        Directory tree.
+    kind : int
+        Kind to find.
+
+    Returns
+    -------
+    nodes : list
+        List of matching nodes.
     """
     nodes = []
 
@@ -101,11 +108,12 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
         tree['directory'] = None
 
     logger.debug('    ' * (indent + 1) + 'block = %d nent = %d nchild = %d'
-                % (tree['block'], tree['nent'], tree['nchild']))
+                 % (tree['block'], tree['nent'], tree['nchild']))
     logger.debug('    ' * indent + 'end } %d' % block)
     last = this
     return tree, last
 
+
 ###############################################################################
 # Writing
 
@@ -139,7 +147,7 @@ def copy_tree(fidin, in_id, nodes, fidout):
                 fidin.seek(d.pos, 0)
 
                 s = fidin.read(4 * 4)
-                tag = Tag(*struct.unpack(">iIii", s))
+                tag = Tag(*np.fromstring(s, dtype=('>i4,>I4,>i4,>i4'))[0])
                 tag.data = np.fromstring(fidin.read(tag.size), dtype='>B')
 
                 _write(fidout, tag.data, tag.kind, 1, tag.type, '>B')
diff --git a/mne/io/write.py b/mne/io/write.py
index 11f0c4f..da090fb 100644
--- a/mne/io/write.py
+++ b/mne/io/write.py
@@ -31,6 +31,20 @@ def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype):
     fid.write(np.array(data, dtype=dtype).tostring())
 
 
+def _get_split_size(split_size):
+    """Convert human-readable bytes to machine-readable bytes."""
+    if isinstance(split_size, string_types):
+        exp = dict(MB=20, GB=30).get(split_size[-2:], None)
+        if exp is None:
+            raise ValueError('split_size has to end with either'
+                             '"MB" or "GB"')
+        split_size = int(float(split_size[:-2]) * 2 ** exp)
+
+    if split_size > 2147483648:
+        raise ValueError('split_size cannot be larger than 2GB')
+    return split_size
+
+
 def write_int(fid, kind, data):
     """Writes a 32-bit integer tag to a fif file"""
     data_size = 4
@@ -118,6 +132,7 @@ def write_float_matrix(fid, kind, mat):
     dims[:mat.ndim] = mat.shape[::-1]
     dims[-1] = mat.ndim
     fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
 
 
 def write_double_matrix(fid, kind, mat):
@@ -137,6 +152,7 @@ def write_double_matrix(fid, kind, mat):
     dims[:mat.ndim] = mat.shape[::-1]
     dims[-1] = mat.ndim
     fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
 
 
 def write_int_matrix(fid, kind, mat):
@@ -157,6 +173,7 @@ def write_int_matrix(fid, kind, mat):
     dims[1] = mat.shape[0]
     dims[2] = 2
     fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
 
 
 def get_machid():
@@ -167,8 +184,8 @@ def get_machid():
     ids : array (length 2, int32)
         The machine identifier used in MNE.
     """
-    mac = b('%012x' %uuid.getnode()) # byte conversion for Py3
-    mac = re.findall(b'..', mac) # split string
+    mac = b('%012x' % uuid.getnode())  # byte conversion for Py3
+    mac = re.findall(b'..', mac)  # split string
     mac += [b'00', b'00']  # add two more fields
 
     # Convert to integer in reverse-order (for some reason)
@@ -243,6 +260,15 @@ def start_file(fname, id_=None):
     return fid
 
 
+def check_fiff_length(fid, close=True):
+    """Ensure our file hasn't grown too large to work properly"""
+    if fid.tell() > 2147483648:  # 2 ** 31, FIFF uses signed 32-bit locations
+        if close:
+            fid.close()
+        raise IOError('FIFF file exceeded 2GB limit, please split file or '
+                      'save to a different format')
+
+
 def end_file(fid):
     """Writes the closing tags to a fif file and closes the file"""
     data_size = 0
@@ -250,21 +276,12 @@ def end_file(fid):
     fid.write(np.array(FIFF.FIFFT_VOID, dtype='>i4').tostring())
     fid.write(np.array(data_size, dtype='>i4').tostring())
     fid.write(np.array(FIFF.FIFFV_NEXT_NONE, dtype='>i4').tostring())
+    check_fiff_length(fid)
     fid.close()
 
 
 def write_coord_trans(fid, trans):
     """Writes a coordinate transformation structure"""
-
-    #?typedef struct _fiffCoordTransRec {
-    #  fiff_int_t   from;                          /*!< Source coordinate system. */
-    #  fiff_int_t   to;                        /*!< Destination coordinate system. */
-    #  fiff_float_t rot[3][3];             /*!< The forward transform (rotation part) */
-    #  fiff_float_t move[3];                   /*!< The forward transform (translation part) */
-    #  fiff_float_t invrot[3][3];              /*!< The inverse transform (rotation part) */
-    #  fiff_float_t invmove[3];            /*!< The inverse transform (translation part) */
-    #} *fiffCoordTrans, fiffCoordTransRec; /*!< Coordinate transformation descriptor */
-
     data_size = 4 * 2 * 12 + 4 * 2
     fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype='>i4').tostring())
     fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype='>i4').tostring())
@@ -289,27 +306,6 @@ def write_coord_trans(fid, trans):
 
 def write_ch_info(fid, ch):
     """Writes a channel information record to a fif file"""
-
-    #typedef struct _fiffChPosRec {
-    #  fiff_int_t   coil_type;      /*!< What kind of coil. */
-    #  fiff_float_t r0[3];          /*!< Coil coordinate system origin */
-    #  fiff_float_t ex[3];          /*!< Coil coordinate system x-axis unit vector */
-    #  fiff_float_t ey[3];          /*!< Coil coordinate system y-axis unit vector */
-    #  fiff_float_t ez[3];                   /*!< Coil coordinate system z-axis unit vector */
-    #} fiffChPosRec,*fiffChPos;                /*!< Measurement channel position and coil type */
-
-    #typedef struct _fiffChInfoRec {
-    #  fiff_int_t    scanNo;    /*!< Scanning order # */
-    #  fiff_int_t    logNo;     /*!< Logical channel # */
-    #  fiff_int_t    kind;      /*!< Kind of channel */
-    #  fiff_float_t  range;     /*!< Voltmeter range (only applies to raw data ) */
-    #  fiff_float_t  cal;       /*!< Calibration from volts to... */
-    #  fiff_ch_pos_t chpos;     /*!< Channel location */
-    #  fiff_int_t    unit;      /*!< Unit of measurement */
-    #  fiff_int_t    unit_mul;  /*!< Unit multiplier exponent */
-    #  fiff_char_t   ch_name[16];   /*!< Descriptive name for the channel */
-    #} fiffChInfoRec,*fiffChInfo;   /*!< Description of one channel */
-
     data_size = 4 * 13 + 4 * 7 + 16
 
     fid.write(np.array(FIFF.FIFF_CH_INFO, dtype='>i4').tostring())
@@ -343,14 +339,6 @@ def write_ch_info(fid, ch):
 
 def write_dig_point(fid, dig):
     """Writes a digitizer data point into a fif file"""
-    #?typedef struct _fiffDigPointRec {
-    #  fiff_int_t kind;               /*!< FIFF_POINT_CARDINAL,
-    #                                  *   FIFF_POINT_HPI, or
-    #                                  *   FIFF_POINT_EEG */
-    #  fiff_int_t ident;              /*!< Number identifying this point */
-    #  fiff_float_t r[3];             /*!< Point location */
-    #} *fiffDigPoint,fiffDigPointRec; /*!< Digitization point description */
-
     data_size = 5 * 4
 
     fid.write(np.array(FIFF.FIFF_DIG_POINT, dtype='>i4').tostring())
@@ -384,6 +372,7 @@ def write_float_sparse_rcs(fid, kind, mat):
 
     dims = [nnzm, mat.shape[0], mat.shape[1], 2]
     fid.write(np.array(dims, dtype='>i4').tostring())
+    check_fiff_length(fid)
 
 
 def _generate_meas_id():
@@ -391,6 +380,16 @@ def _generate_meas_id():
     id_ = dict()
     id_['version'] = (1 << 16) | 2
     id_['machid'] = get_machid()
-    id_['secs'] = time.time()
-    id_['usecs'] = 0            # Do not know how we could get this XXX
+    id_['secs'], id_['usecs'] = _date_now()
     return id_
+
+
+def _date_now():
+    """Helper to get date in secs, usecs"""
+    now = time.time()
+    # Get date in secs/usecs (as in `fill_measurement_info` in
+    # mne/forward/forward.py)
+    date_arr = np.array([np.floor(now), 1e6 * (now - np.floor(now))],
+                        dtype='int32')
+
+    return date_arr
diff --git a/mne/label.py b/mne/label.py
index ae17f22..8452a31 100644
--- a/mne/label.py
+++ b/mne/label.py
@@ -10,18 +10,16 @@ from os import path as op
 import os
 import copy as cp
 import re
-from warnings import warn
 
 import numpy as np
 from scipy import linalg, sparse
 
 from .fixes import digitize, in1d
-from .utils import (get_subjects_dir, _check_subject, logger, verbose,
-                    deprecated)
-from .source_estimate import (_read_stc, mesh_edges, mesh_dist, morph_data,
-                              SourceEstimate, spatial_src_connectivity)
+from .utils import get_subjects_dir, _check_subject, logger, verbose
+from .source_estimate import (morph_data, SourceEstimate,
+                              spatial_src_connectivity)
 from .source_space import add_source_space_distances
-from .surface import read_surface, fast_cross_3d
+from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
 from .source_space import SourceSpaces
 from .parallel import parallel_func, check_n_jobs
 from .stats.cluster_level import _find_clusters
@@ -123,8 +121,8 @@ def _n_colors(n, bytes_=False, cmap='hsv'):
     """
     n_max = 2 ** 10
     if n > n_max:
-        err = "Can't produce more than %i unique colors" % n_max
-        raise NotImplementedError(err)
+        raise NotImplementedError("Can't produce more than %i unique "
+                                  "colors" % n_max)
 
     from matplotlib.cm import get_cmap
     cm = get_cmap(cmap, n_max)
@@ -144,10 +142,12 @@ class Label(object):
     """A FreeSurfer/MNE label with vertices restricted to one hemisphere
 
     Labels can be combined with the ``+`` operator:
-     - Duplicate vertices are removed.
-     - If duplicate vertices have conflicting position values, an error is
-       raised.
-     - Values of duplicate vertices are summed.
+
+        * Duplicate vertices are removed.
+        * If duplicate vertices have conflicting position values, an error
+          is raised.
+        * Values of duplicate vertices are summed.
+
 
     Parameters
     ----------
@@ -159,7 +159,11 @@ class Label(object):
         values at the vertices. If None, then ones are used.
     hemi : 'lh' | 'rh'
         Hemisphere to which the label applies.
-    comment, name, fpath : str
+    comment : str
+        Kept as information but not used by the object itself.
+    name : str
+        Kept as information but not used by the object itself.
+    filename : str
         Kept as information but not used by the object itself.
     subject : str | None
         Name of the subject the label is from.
@@ -217,9 +221,8 @@ class Label(object):
             pos = np.asarray(pos)
 
         if not (len(vertices) == len(values) == len(pos)):
-            err = ("vertices, values and pos need to have same length (number "
-                   "of vertices)")
-            raise ValueError(err)
+            raise ValueError("vertices, values and pos need to have same "
+                             "length (number of vertices)")
 
         # name
         if name is None and filename is not None:
@@ -339,8 +342,35 @@ class Label(object):
                       self.subject, color, verbose)
         return label
 
+    def __sub__(self, other):
+        if isinstance(other, BiHemiLabel):
+            if self.hemi == 'lh':
+                return self - other.lh
+            else:
+                return self - other.rh
+        elif isinstance(other, Label):
+            if self.subject != other.subject:
+                raise ValueError('Label subject parameters must match, got '
+                                 '"%s" and "%s". Consider setting the '
+                                 'subject parameter on initialization, or '
+                                 'setting label.subject manually before '
+                                 'combining labels.' % (self.subject,
+                                                        other.subject))
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        if self.hemi == other.hemi:
+            keep = in1d(self.vertices, other.vertices, True, invert=True)
+        else:
+            keep = np.arange(len(self.vertices))
+
+        name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
+        return Label(self.vertices[keep], self.pos[keep], self.values[keep],
+                     self.hemi, self.comment, name, None, self.subject,
+                     self.color, self.verbose)
+
     def save(self, filename):
-        """Write to disk as FreeSurfer *.label file
+        """Write to disk as FreeSurfer \*.label file
 
         Parameters
         ----------
@@ -397,11 +427,10 @@ class Label(object):
 
         nearest = hemi_src['nearest']
         if nearest is None:
-            msg = ("Computing patch info for source space, this can take "
-                   "a while. In order to avoid this in the future, run "
-                   "mne.add_source_space_distances() on the source space "
-                   "and save it.")
-            logger.warn(msg)
+            logger.warn("Computing patch info for source space, this can take "
+                        "a while. In order to avoid this in the future, run "
+                        "mne.add_source_space_distances() on the source space "
+                        "and save it.")
             add_source_space_distances(src)
             nearest = hemi_src['nearest']
 
@@ -499,7 +528,7 @@ class Label(object):
             values will be morphed to the set of vertices specified in grade[0]
             and grade[1], assuming that these are vertices for the left and
             right hemispheres. Note that specifying the vertices (e.g.,
-            grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
+            ``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
             standard grade 5 source space) can be substantially faster than
             computing vertex locations. If one array is used, it is assumed
             that all vertices belong to the hemisphere of the label. To create
@@ -521,33 +550,33 @@ class Label(object):
         Notes
         -----
         This function will set label.pos to be all zeros. If the positions
-        on the new surface are required, consider using mne.read_surface
-        with label.vertices.
+        on the new surface are required, consider using `mne.read_surface`
+        with `label.vertices`.
         """
         subject_from = _check_subject(self.subject, subject_from)
         if not isinstance(subject_to, string_types):
             raise TypeError('"subject_to" must be entered as a string')
         if not isinstance(smooth, int):
-            raise ValueError('smooth must be an integer')
+            raise TypeError('smooth must be an integer')
         if np.all(self.values == 0):
             raise ValueError('Morphing label with all zero values will result '
                              'in the label having no vertices. Consider using '
                              'something like label.values.fill(1.0).')
         if(isinstance(grade, np.ndarray)):
             if self.hemi == 'lh':
-                grade = [grade, np.array([])]
+                grade = [grade, np.array([], int)]
             else:
-                grade = [np.array([]), grade]
+                grade = [np.array([], int), grade]
         if self.hemi == 'lh':
-            vertices = [self.vertices, np.array([])]
+            vertices = [self.vertices, np.array([], int)]
         else:
-            vertices = [np.array([]), self.vertices]
+            vertices = [np.array([], int), self.vertices]
         data = self.values[:, np.newaxis]
         stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
                              subject=subject_from)
         stc = morph_data(subject_from, subject_to, stc, grade=grade,
                          smooth=smooth, subjects_dir=subjects_dir,
-                         n_jobs=n_jobs)
+                         warn=False, n_jobs=n_jobs)
         inds = np.nonzero(stc.data)[0]
         if copy is True:
             label = self.copy()
@@ -556,9 +585,9 @@ class Label(object):
         label.values = stc.data[inds, :].ravel()
         label.pos = np.zeros((len(inds), 3))
         if label.hemi == 'lh':
-            label.vertices = stc.vertno[0][inds]
+            label.vertices = stc.vertices[0][inds]
         else:
-            label.vertices = stc.vertno[1][inds]
+            label.vertices = stc.vertices[1][inds]
         label.subject = subject_to
         return label
 
@@ -598,28 +627,91 @@ class Label(object):
         """
         return split_label(self, parts, subject, subjects_dir, freesurfer)
 
+    def get_vertices_used(self, vertices=None):
+        """Get the source space's vertices inside the label
+
+        Parameters
+        ----------
+        vertices : ndarray of int, shape (n_vertices,) | None
+            The set of vertices to compare the label to. If None, equals to
+            ``np.arange(10242)``. Defaults to None.
+
+        Returns
+        -------
+        label_verts : ndarray of in, shape (n_label_vertices,)
+            The vertices of the label corresponding used by the data.
+        """
+        if vertices is None:
+            vertices = np.arange(10242)
+
+        label_verts = vertices[in1d(vertices, self.vertices)]
+        return label_verts
+
+    def get_tris(self, tris, vertices=None):
+        """Get the source space's triangles inside the label
+
+        Parameters
+        ----------
+        tris : ndarray of int, shape (n_tris, 3)
+            The set of triangles corresponding to the vertices in a
+            source space.
+        vertices : ndarray of int, shape (n_vertices,) | None
+            The set of vertices to compare the label to. If None, equals to
+            ``np.arange(10242)``. Defaults to None.
+
+        Returns
+        -------
+        label_tris : ndarray of int, shape (n_tris, 3)
+            The subset of tris used by the label
+        """
+        vertices_ = self.get_vertices_used(vertices)
+        selection = np.all(in1d(tris, vertices_).reshape(tris.shape),
+                           axis=1)
+        label_tris = tris[selection]
+        if len(np.unique(label_tris)) < len(vertices_):
+            logger.info('Surprising label structure. Trying to repair '
+                        'triangles.')
+            dropped_vertices = np.setdiff1d(vertices_, label_tris)
+            n_dropped = len(dropped_vertices)
+            assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
+
+            #  put missing vertices as extra zero-length triangles
+            add_tris = (dropped_vertices +
+                        np.zeros((len(dropped_vertices), 3), dtype=int).T)
+
+            label_tris = np.r_[label_tris, add_tris.T]
+            assert len(np.unique(label_tris)) == len(vertices_)
+
+        return label_tris
+
 
 class BiHemiLabel(object):
     """A freesurfer/MNE label with vertices in both hemispheres
 
     Parameters
     ----------
-    lh, rh : Label
-        Label objects representing the left and the right hemisphere,
-        respectively
+    lh : Label
+        Label for the left hemisphere.
+    rh : Label
+        Label for the right hemisphere.
     name : None | str
         name for the label
+    color : None | matplotlib color
+        Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
+        Note that due to file specification limitations, the color isn't saved
+        to or loaded from files written to disk.
 
     Attributes
     ----------
-    lh, rh : Label
-        Labels for the left and right hemisphere, respectively.
+    lh : Label
+        Label for the left hemisphere.
+    rh : Label
+        Label for the right hemisphere.
     name : None | str
         A name for the label. It is OK to change that attribute manually.
     subject : str | None
         Subject the label is from.
     """
-    hemi = 'both'
 
     def __init__(self, lh, rh, name=None, color=None):
         if lh.subject != rh.subject:
@@ -630,6 +722,7 @@ class BiHemiLabel(object):
         self.name = name
         self.subject = lh.subject
         self.color = color
+        self.hemi = 'both'
 
     def __repr__(self):
         temp = "<BiHemiLabel  |  %s, lh : %i vertices,  rh : %i vertices>"
@@ -658,6 +751,28 @@ class BiHemiLabel(object):
         color = _blend_colors(self.color, other.color)
         return BiHemiLabel(lh, rh, name, color)
 
+    def __sub__(self, other):
+        if isinstance(other, Label):
+            if other.hemi == 'lh':
+                lh = self.lh - other
+                rh = self.rh
+            else:
+                rh = self.rh - other
+                lh = self.lh
+        elif isinstance(other, BiHemiLabel):
+            lh = self.lh - other.lh
+            rh = self.rh - other.rh
+        else:
+            raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
+
+        if len(lh.vertices) == 0:
+            return rh
+        elif len(rh.vertices) == 0:
+            return lh
+        else:
+            name = '%s - %s' % (self.name, other.name)
+            return BiHemiLabel(lh, rh, name, self.color)
+
 
 def read_label(filename, subject=None, color=None):
     """Read FreeSurfer Label file
@@ -681,10 +796,11 @@ def read_label(filename, subject=None, color=None):
     -------
     label : Label
         Instance of Label object with attributes:
-            comment        comment from the first line of the label file
-            vertices       vertex indices (0 based, column 1)
-            pos            locations in meters (columns 2 - 4 divided by 1000)
-            values         values at the vertices (column 5)
+
+            - ``comment``: comment from the first line of the label file
+            - ``vertices``: vertex indices (0 based, column 1)
+            - ``pos``: locations in meters (columns 2 - 4 divided by 1000)
+            - ``values``: values at the vertices (column 5)
 
     See Also
     --------
@@ -705,10 +821,9 @@ def read_label(filename, subject=None, color=None):
 
     # find name
     if basename.startswith(('lh.', 'rh.')):
+        basename_ = basename[3:]
         if basename.endswith('.label'):
-            basename_ = basename[3:-6]
-        else:
-            basename_ = basename[3:]
+            basename_ = basename[:-6]
     else:
         basename_ = basename[:-9]
     name = "%s-%s" % (basename_, hemi)
@@ -842,7 +957,7 @@ def split_label(label, parts=2, subject=None, subjects_dir=None,
         raise ValueError("Can't split label into %i parts" % n_parts)
 
     # find the subject
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     if label.subject is None and subject is None:
         raise ValueError("The subject needs to be specified.")
     elif subject is None:
@@ -850,9 +965,9 @@ def split_label(label, parts=2, subject=None, subjects_dir=None,
     elif label.subject is None:
         pass
     elif subject != label.subject:
-        err = ("The label specifies a different subject (%r) from the subject "
-               "parameter (%r)." % label.subject, subject)
-        raise ValueError(err)
+        raise ValueError("The label specifies a different subject (%r) from "
+                         "the subject parameter (%r)."
+                         % label.subject, subject)
 
     # find the spherical surface
     surf_fname = '.'.join((label.hemi, 'sphere'))
@@ -928,42 +1043,6 @@ def split_label(label, parts=2, subject=None, subjects_dir=None,
     return labels
 
 
-def label_time_courses(labelfile, stcfile):
-    """Extract the time courses corresponding to a label file from an stc file
-
-    Parameters
-    ----------
-    labelfile : string
-        Path to the label file.
-    stcfile : string
-        Path to the stc file. The name of the stc file (must be on the
-        same subject and hemisphere as the stc file).
-
-    Returns
-    -------
-    values : 2d array
-        The time courses.
-    times : 1d array
-        The time points.
-    vertices : array
-        The indices of the vertices corresponding to the time points.
-    """
-    stc = _read_stc(stcfile)
-    lab = read_label(labelfile)
-
-    vertices = np.intersect1d(stc['vertices'], lab.vertices)
-    idx = [k for k in range(len(stc['vertices']))
-           if stc['vertices'][k] in vertices]
-
-    if len(vertices) == 0:
-        raise ValueError('No vertices match the label in the stc file')
-
-    values = stc['data'][idx]
-    times = stc['tmin'] + stc['tstep'] * np.arange(stc['data'].shape[1])
-
-    return values, times, vertices
-
-
 def label_sign_flip(label, src):
     """Compute sign for label averaging
 
@@ -989,12 +1068,12 @@ def label_sign_flip(label, src):
     if label.hemi == 'lh':
         vertno_sel = np.intersect1d(lh_vertno, label.vertices)
         if len(vertno_sel) == 0:
-            return np.array([])
+            return np.array([], int)
         ori = src[0]['nn'][vertno_sel]
     elif label.hemi == 'rh':
         vertno_sel = np.intersect1d(rh_vertno, label.vertices)
         if len(vertno_sel) == 0:
-            return np.array([])
+            return np.array([], int)
         ori = src[1]['nn'][vertno_sel]
     else:
         raise Exception("Unknown hemisphere type")
@@ -1006,7 +1085,7 @@ def label_sign_flip(label, src):
     return flip
 
 
-def stc_to_label(stc, src=None, smooth=None, connected=False,
+def stc_to_label(stc, src=None, smooth=True, connected=False,
                  subjects_dir=None):
     """Compute a label from the non-zero sources in an stc object.
 
@@ -1021,10 +1100,7 @@ def stc_to_label(stc, src=None, smooth=None, connected=False,
     smooth : bool
         Fill in vertices on the cortical surface that are not in the source
         space based on the closest source space vertex (requires
-        src to be a SourceSpace). The default is currently to smooth with a
-        deprecated method, and will change to True in v0.9 (i.e., the parameter
-        should be explicitly specified as boolean until then to avoid a
-        deprecation warning).
+        src to be a SourceSpace).
     connected : bool
         If True a list of connected labels will be returned in each
         hemisphere. The labels are ordered in decreasing order depending
@@ -1042,6 +1118,9 @@ def stc_to_label(stc, src=None, smooth=None, connected=False,
         ordered in decreasing order depending of the maximum value in the stc.
         If no Label is available in an hemisphere, an empty list is returned.
     """
+    if not isinstance(smooth, bool):
+        raise ValueError('smooth should be True or False. Got %s.' % smooth)
+
     src = stc.subject if src is None else src
     if src is None:
         raise ValueError('src cannot be None if stc.subject is None')
@@ -1053,35 +1132,18 @@ def stc_to_label(stc, src=None, smooth=None, connected=False,
     if not isinstance(stc, SourceEstimate):
         raise ValueError('SourceEstimate should be surface source estimates')
 
-    if not isinstance(smooth, bool):
-        if smooth is None:
-            msg = ("The smooth parameter was not explicitly specified. The "
-                   "default behavior of stc_to_label() will change in v0.9 "
-                   "to filling the label using source space patch "
-                   "information. In order to avoid this warning, set smooth "
-                   "to a boolean explicitly.")
-            smooth = 5
-        else:
-            msg = ("The smooth parameter of stc_to_label() was specified as "
-                   "int. This value is deprecated and will raise an error in "
-                   "v0.9. In order to avoid this warning, set smooth to a "
-                   "boolean.")
-        warn(msg, DeprecationWarning)
-
     if isinstance(src, string_types):
         if connected:
             raise ValueError('The option to return only connected labels is '
                              'only available if source spaces are provided.')
-        if isinstance(smooth, bool) and smooth:
-            msg = ("stc_to_label with smooth='patch' requires src to be an "
+        if smooth:
+            msg = ("stc_to_label with smooth=True requires src to be an "
                    "instance of SourceSpace")
             raise ValueError(msg)
-        subjects_dir = get_subjects_dir(subjects_dir)
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
         surf_path_from = op.join(subjects_dir, src, 'surf')
-        rr_lh, tris_lh = read_surface(op.join(surf_path_from,
-                                      'lh.white'))
-        rr_rh, tris_rh = read_surface(op.join(surf_path_from,
-                                      'rh.white'))
+        rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
+        rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
         rr = [rr_lh, rr_rh]
         tris = [tris_lh, tris_rh]
     else:
@@ -1097,7 +1159,7 @@ def stc_to_label(stc, src=None, smooth=None, connected=False,
     cnt = 0
     cnt_full = 0
     for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
-            zip(['lh', 'rh'], stc.vertno, tris, rr)):
+            zip(['lh', 'rh'], stc.vertices, tris, rr)):
         this_data = stc.data[cnt:cnt + len(this_vertno)]
         e = mesh_edges(this_tris)
         e.data[e.data == 2] = 1
@@ -1141,19 +1203,11 @@ def stc_to_label(stc, src=None, smooth=None, connected=False,
             colors = _n_colors(len(clusters))
             for c, color in zip(clusters, colors):
                 idx_use = c
-                if isinstance(smooth, bool) and smooth:
-                    label = Label(idx_use, this_rr[idx_use], None, hemi,
-                                  'Label from stc', subject=subject,
-                                  color=color).fill(src)
-                else:
-                    for k in range(smooth):
-                        e_use = e[:, idx_use]
-                        data1 = e_use * np.ones(len(idx_use))
-                        idx_use = np.where(data1)[0]
-
-                    label = Label(idx_use, this_rr[idx_use], None, hemi,
-                                  'Label from stc', subject=subject,
-                                  color=color)
+                label = Label(idx_use, this_rr[idx_use], None, hemi,
+                              'Label from stc', subject=subject,
+                              color=color)
+                if smooth:
+                    label = label.fill(src)
 
                 this_labels.append(label)
 
@@ -1242,7 +1296,7 @@ def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
 
 
 def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
-                overlap=True, names=None):
+                overlap=True, names=None, surface='white'):
     """Generate circular labels in source space with region growing
 
     This function generates a number of labels in source space by growing
@@ -1279,6 +1333,8 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
     names : None | list of str
         Assign names to the new labels (list needs to have the same length as
         seeds).
+    surface : string
+        The surface used to grow the labels, defaults to the white surface.
 
     Returns
     -------
@@ -1287,7 +1343,7 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
         vertex and extent; the ``values``  attribute contains distance from the
         seed in millimeters
     """
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     n_jobs = check_n_jobs(n_jobs)
 
     # make sure the inputs are arrays
@@ -1332,7 +1388,8 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
     # load the surfaces and create the distance graphs
     tris, vert, dist = {}, {}, {}
     for hemi in set(hemis):
-        surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.white')
+        surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
+                             surface)
         vert[hemi], tris[hemi] = read_surface(surf_fname)
         dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
 
@@ -1489,11 +1546,11 @@ def _read_annot(fname):
             n_entries = np.fromfile(fid, '>i4', 1)[0]
             ctab = np.zeros((n_entries, 5), np.int)
             length = np.fromfile(fid, '>i4', 1)[0]
-            _ = np.fromfile(fid, "|S%d" % length, 1)[0]  # Orig table path
+            np.fromfile(fid, "|S%d" % length, 1)  # Orig table path
             entries_to_read = np.fromfile(fid, '>i4', 1)[0]
             names = list()
             for i in range(entries_to_read):
-                _ = np.fromfile(fid, '>i4', 1)[0]  # Structure
+                np.fromfile(fid, '>i4', 1)  # Structure
                 name_length = np.fromfile(fid, '>i4', 1)[0]
                 name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
                 names.append(name)
@@ -1525,31 +1582,14 @@ def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
         else:
             hemis = [hemi]
 
-        annot_fname = list()
-        for hemi in hemis:
-            fname = op.join(subjects_dir, subject, 'label',
-                            '%s.%s.annot' % (hemi, parc))
-            annot_fname.append(fname)
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+        dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
+        annot_fname = [dst % hemi_ for hemi_ in hemis]
 
     return annot_fname, hemis
 
 
 @verbose
- at deprecated("labels_from_parc() will be removed in release 0.9. Use "
-            "read_labels_from_annot() instead (note the change in return "
-            "values).")
-def labels_from_parc(subject, parc='aparc', hemi='both', surf_name='white',
-                     annot_fname=None, regexp=None, subjects_dir=None,
-                     verbose=None):
-    """Deprecated (will be removed in mne 0.9). Use read_labels_from_annot()
-    instead"""
-    labels = read_labels_from_annot(subject, parc, hemi, surf_name,
-                                    annot_fname, regexp, subjects_dir, verbose)
-    label_colors = [l.color for l in labels]
-    return labels, label_colors
-
-
- at verbose
 def read_labels_from_annot(subject, parc='aparc', hemi='both',
                            surf_name='white', annot_fname=None, regexp=None,
                            subjects_dir=None, verbose=None):
@@ -1696,37 +1736,9 @@ def _write_annot(fname, annot, ctab, names):
 
 
 @verbose
- at deprecated("parc_from_labels() will be removed in release 0.9. Use "
-            "write_labels_to_annot() instead (note the change in the function "
-            "signature).")
-def parc_from_labels(labels, colors=None, subject=None, parc=None,
-                     annot_fname=None, overwrite=False, subjects_dir=None,
-                     verbose=None):
-    """Deprecated (will be removed in mne 0.9). Use write_labels_to_annot()
-    instead"""
-    if colors is not None:
-        # do some input checking
-        colors = np.asarray(colors)
-        if colors.shape[1] != 4:
-            raise ValueError('Each color must have 4 values')
-        if len(colors) != len(labels):
-            raise ValueError('colors must have the same length as labels')
-        if np.any(colors < 0) or np.any(colors > 1):
-            raise ValueError('color values must be between 0 and 1')
-
-        # assign colors to labels
-        labels = [label.copy() for label in labels]
-        for label, color in zip(labels, colors):
-            label.color = color
-
-    write_labels_to_annot(labels, subject, parc, overwrite, subjects_dir,
-                          annot_fname, verbose)
-
-
- at verbose
 def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
                           subjects_dir=None, annot_fname=None,
-                          colormap='hsv', verbose=None):
+                          colormap='hsv', hemi='both', verbose=None):
     """Create a FreeSurfer annotation from a list of labels
 
     Parameters
@@ -1747,15 +1759,23 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
     colormap : str
         Colormap to use to generate label colors for labels that do not
         have a color specified.
+    hemi : 'both' | 'lh' | 'rh'
+        The hemisphere(s) for which to write \*.annot files (only applies if
+        annot_fname is not specified; default is 'both').
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    Vertices that are not covered by any of the labels are assigned to a label
+    named "unknown".
     """
     logger.info('Writing labels to parcellation..')
 
     subjects_dir = get_subjects_dir(subjects_dir)
 
     # get the .annot filenames and hemispheres
-    annot_fname, hemis = _get_annot_fname(annot_fname, subject, 'both', parc,
+    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
                                           subjects_dir)
 
     if not overwrite:
@@ -1775,58 +1795,61 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
     for hemi, fname in zip(hemis, annot_fname):
         hemi_labels = [label for label in labels if label.hemi == hemi]
         n_hemi_labels = len(hemi_labels)
+
         if n_hemi_labels == 0:
-            # no labels for this hemisphere
-            continue
-        hemi_labels.sort(key=lambda label: label.name)
-
-        # convert colors to 0-255 RGBA tuples
-        hemi_colors = [no_color if label.color is None else
-                       tuple(int(round(255 * i)) for i in label.color)
-                       for label in hemi_labels]
-        ctab = np.array(hemi_colors, dtype=np.int32)
-        ctab_rgb = ctab[:, :3]
-
-        # make dict to check label colors (for annot ID only R, G and B count)
-        labels_by_color = defaultdict(list)
-        for label, color in zip(hemi_labels, ctab_rgb):
-            labels_by_color[tuple(color)].append(label.name)
-
-        # check label colors
-        for color, names in labels_by_color.items():
-            if color == no_color_rgb:
-                continue
+            ctab = np.empty((0, 4), dtype=np.int32)
+            ctab_rgb = ctab[:, :3]
+        else:
+            hemi_labels.sort(key=lambda label: label.name)
+
+            # convert colors to 0-255 RGBA tuples
+            hemi_colors = [no_color if label.color is None else
+                           tuple(int(round(255 * i)) for i in label.color)
+                           for label in hemi_labels]
+            ctab = np.array(hemi_colors, dtype=np.int32)
+            ctab_rgb = ctab[:, :3]
+
+            # make color dict (for annot ID, only R, G and B count)
+            labels_by_color = defaultdict(list)
+            for label, color in zip(hemi_labels, ctab_rgb):
+                labels_by_color[tuple(color)].append(label.name)
+
+            # check label colors
+            for color, names in labels_by_color.items():
+                if color == no_color_rgb:
+                    continue
 
-            if color == (0, 0, 0):
-                # we cannot have an all-zero color, otherw. e.g. tksurfer
-                # refuses to read the parcellation
-                msg = ('    At least one label contains a color with, "r=0, '
-                       'g=0, b=0" value. Some FreeSurfer tools may fail to '
-                       'read the parcellation')
-                logger.warning(msg)
-
-            if any(i > 255 for i in color):
-                msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
-                invalid_colors.append(msg)
-
-            if len(names) > 1:
-                msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
-                duplicate_colors.append(msg)
-
-        # replace None values (labels with unspecified color)
-        if labels_by_color[no_color_rgb]:
-            default_colors = _n_colors(n_hemi_labels, bytes_=True,
-                                       cmap=colormap)
-            safe_color_i = 0  # keep track of colors known to be in hemi_colors
-            for i in xrange(n_hemi_labels):
-                if ctab[i, 0] == -1:
-                    color = default_colors[i]
-                    # make sure to add no duplicate color
-                    while np.any(np.all(color[:3] == ctab_rgb, 1)):
-                        color = default_colors[safe_color_i]
-                        safe_color_i += 1
-                    # assign the color
-                    ctab[i] = color
+                if color == (0, 0, 0):
+                    # we cannot have an all-zero color, otherw. e.g. tksurfer
+                    # refuses to read the parcellation
+                    msg = ('At least one label contains a color with, "r=0, '
+                           'g=0, b=0" value. Some FreeSurfer tools may fail '
+                           'to read the parcellation')
+                    logger.warning(msg)
+
+                if any(i > 255 for i in color):
+                    msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
+                    invalid_colors.append(msg)
+
+                if len(names) > 1:
+                    msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
+                    duplicate_colors.append(msg)
+
+            # replace None values (labels with unspecified color)
+            if labels_by_color[no_color_rgb]:
+                default_colors = _n_colors(n_hemi_labels, bytes_=True,
+                                           cmap=colormap)
+                # keep track of colors known to be in hemi_colors :
+                safe_color_i = 0
+                for i in xrange(n_hemi_labels):
+                    if ctab[i, 0] == -1:
+                        color = default_colors[i]
+                        # make sure to add no duplicate color
+                        while np.any(np.all(color[:3] == ctab_rgb, 1)):
+                            color = default_colors[safe_color_i]
+                            safe_color_i += 1
+                        # assign the color
+                        ctab[i] = color
 
         # find number of vertices in surface
         if subject is not None and subjects_dir is not None:
@@ -1835,8 +1858,11 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
             points, _ = read_surface(fpath)
             n_vertices = len(points)
         else:
-            max_vert = max(np.max(label.vertices) for label in hemi_labels)
-            n_vertices = max_vert + 1
+            if len(hemi_labels) > 0:
+                max_vert = max(np.max(label.vertices) for label in hemi_labels)
+                n_vertices = max_vert + 1
+            else:
+                n_vertices = 1
             msg = ('    Number of vertices in the surface could not be '
                    'verified because the surface file could not be found; '
                    'specify subject and subjects_dir parameters.')
@@ -1863,6 +1889,13 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
 
         hemi_names = [label.name for label in hemi_labels]
 
+        if None in hemi_names:
+            msg = ("Found %i labels with no name. Writing annotation file"
+                   "requires all labels named" % (hemi_names.count(None)))
+            # raise the error immediately rather than crash with an
+            # uninformative error later (e.g. cannot join NoneType)
+            raise ValueError(msg)
+
         # Assign unlabeled vertices to an "unknown" label
         unlabeled = (annot == -1)
         if np.any(unlabeled):
diff --git a/mne/layouts/__init__.py b/mne/layouts/__init__.py
deleted file mode 100644
index 58cbdf4..0000000
--- a/mne/layouts/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .layout import (Layout, make_eeg_layout, make_grid_layout, read_layout,
-                     find_layout)
diff --git a/mne/layouts/layout.py b/mne/layouts/layout.py
deleted file mode 100644
index 4706576..0000000
--- a/mne/layouts/layout.py
+++ /dev/null
@@ -1,563 +0,0 @@
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Denis Engemann <denis.engemann at gmail.com>
-#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Eric Larson <larson.eric.d at gmail.com>
-#
-# License: Simplified BSD
-
-import warnings
-from collections import defaultdict
-import os.path as op
-import numpy as np
-from scipy.optimize import leastsq
-from ..preprocessing.maxfilter import fit_sphere_to_headshape
-from .. import pick_types
-from ..io.constants import FIFF
-from ..utils import _clean_names
-from ..externals.six.moves import map
-
-
-class Layout(object):
-    """Sensor layouts
-
-    Layouts are typically loaded from a file using read_layout. Only use this
-    class directly if you're constructing a new layout.
-
-    Parameters
-    ----------
-    box : tuple of length 4
-        The box dimension (x_min, x_max, y_min, y_max)
-    pos : array, shape=(n_channels, 4)
-        The positions of the channels in 2d (x, y, width, height)
-    names : list
-        The channel names
-    ids : list
-        The channel ids
-    kind : str
-        The type of Layout (e.g. 'Vectorview-all')
-    """
-    def __init__(self, box, pos, names, ids, kind):
-        self.box = box
-        self.pos = pos
-        self.names = names
-        self.ids = ids
-        self.kind = kind
-
-    def save(self, fname):
-        """Save Layout to disk
-
-        Parameters
-        ----------
-        fname : str
-            The file name (e.g. 'my_layout.lout')
-        """
-        x = self.pos[:, 0]
-        y = self.pos[:, 1]
-        width = self.pos[:, 2]
-        height = self.pos[:, 3]
-        if fname.endswith('.lout'):
-            out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
-        elif fname.endswith('.lay'):
-            out_str = ''
-        else:
-            raise ValueError('Unknown layout type. Should be of type '
-                             '.lout or .lay.')
-
-        for ii in range(x.shape[0]):
-            out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
-                        x[ii], y[ii], width[ii], height[ii], self.names[ii]))
-
-        f = open(fname, 'w')
-        f.write(out_str)
-        f.close()
-
-    def __repr__(self):
-        return '<Layout | %s - Channels: %s ...>' % (self.kind,
-                                                     ', '.join(self.names[:3]))
-
-
-def _read_lout(fname):
-    """Aux function"""
-    with open(fname) as f:
-        box_line = f.readline()  # first line contains box dimension
-        box = tuple(map(float, box_line.split()))
-        names, pos, ids = [], [], []
-        for line in f:
-            splits = line.split()
-            if len(splits) == 7:
-                cid, x, y, dx, dy, chkind, nb = splits
-                name = chkind + ' ' + nb
-            else:
-                cid, x, y, dx, dy, name = splits
-            pos.append(np.array([x, y, dx, dy], dtype=np.float))
-            names.append(name)
-            ids.append(int(cid))
-
-    pos = np.array(pos)
-
-    return box, pos, names, ids
-
-
-def _read_lay(fname):
-    """Aux function"""
-    with open(fname) as f:
-        box = None
-        names, pos, ids = [], [], []
-        for line in f:
-            splits = line.split()
-            cid, x, y, dx, dy, name = splits
-            pos.append(np.array([x, y, dx, dy], dtype=np.float))
-            names.append(name)
-            ids.append(int(cid))
-
-    pos = np.array(pos)
-
-    return box, pos, names, ids
-
-
-def read_layout(kind, path=None, scale=True):
-    """Read layout from a file
-
-    Parameters
-    ----------
-    kind : str
-        The name of the .lout file (e.g. kind='Vectorview-all' for
-        'Vectorview-all.lout')
-
-    path : str | None
-        The path of the folder containing the Layout file
-
-    scale : bool
-        Apply useful scaling for out the box plotting using layout.pos
-
-    Returns
-    -------
-    layout : instance of Layout
-        The layout
-    """
-    if path is None:
-        path = op.dirname(__file__)
-
-    if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
-        kind += '.lout'
-    elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
-        kind += '.lay'
-
-    if kind.endswith('.lout'):
-        fname = op.join(path, kind)
-        kind = kind[:-5]
-        box, pos, names, ids = _read_lout(fname)
-    elif kind.endswith('.lay'):
-        fname = op.join(path, kind)
-        kind = kind[:-4]
-        box, pos, names, ids = _read_lay(fname)
-        kind.endswith('.lay')
-    else:
-        raise ValueError('Unknown layout type. Should be of type '
-                         '.lout or .lay.')
-
-    if scale:
-        pos[:, 0] -= np.min(pos[:, 0])
-        pos[:, 1] -= np.min(pos[:, 1])
-        scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
-        pos /= scaling
-        pos[:, :2] += 0.03
-        pos[:, :2] *= 0.97 / 1.03
-        pos[:, 2:] *= 0.94
-
-    return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
-
-
-def make_eeg_layout(info, radius=20, width=5, height=4):
-    """Create .lout file from EEG electrode digitization
-
-    Parameters
-    ----------
-    info : dict
-        Measurement info (e.g., raw.info)
-    radius : float
-        Viewport radius
-    width : float
-        Viewport width
-    height : float
-        Viewport height
-
-    Returns
-    -------
-    layout : Layout
-        The generated Layout
-    """
-    if info['dig'] in [[], None]:
-        raise RuntimeError('Did not find any digitization points in the info. '
-                           'Cannot generate layout based on the subject\'s '
-                           'head shape')
-
-    radius_head, origin_head, origin_device = fit_sphere_to_headshape(info)
-    inds = pick_types(info, meg=False, eeg=True, ref_meg=False,
-                      exclude='bads')
-    hsp = [info['chs'][ii]['eeg_loc'][:, 0] for ii in inds]
-    names = [info['chs'][ii]['ch_name'] for ii in inds]
-    if len(hsp) <= 0:
-        raise ValueError('No EEG digitization points found')
-
-    if not len(hsp) == len(names):
-        raise ValueError("Channel names don't match digitization values")
-    hsp = np.array(hsp)
-
-    # Move points to origin
-    hsp -= origin_head / 1e3  # convert to millimeters
-
-    # Calculate angles
-    r = np.sqrt(np.sum(hsp ** 2, axis=-1))
-    theta = np.arccos(hsp[:, 2] / r)
-    phi = np.arctan2(hsp[:, 1], hsp[:, 0])
-
-    # Mark the points that might have caused bad angle estimates
-    iffy = np.nonzero(np.sqrt(np.sum(hsp[:, :2] ** 2, axis=-1))
-                      < np.finfo(np.float).eps * 10)
-    theta[iffy] = 0
-    phi[iffy] = 0
-
-    # Do the azimuthal equidistant projection
-    x = radius * (2.0 * theta / np.pi) * np.cos(phi)
-    y = radius * (2.0 * theta / np.pi) * np.sin(phi)
-
-    n_channels = len(x)
-    pos = np.c_[x, y, width * np.ones(n_channels),
-                height * np.ones(n_channels)]
-
-    box = (x.min() - 0.1 * width, x.max() + 1.1 * width,
-           y.min() - 0.1 * width, y.max() + 1.1 * height)
-    ids = 1 + np.arange(n_channels)
-    layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
-    return layout
-
-
-def make_grid_layout(info, picks=None):
-    """ Generate .lout file for custom data, i.e., ICA sources
-
-    Parameters
-    ----------
-    info : dict
-        Measurement info (e.g., raw.info). If None, default names will be
-        employed.
-    picks : array-like of int | None
-        The indices of the channels to be included. If None, al misc channels
-        will be included.
-
-    Returns
-    -------
-    layout : Layout
-        The generated layout.
-    """
-    if picks is None:
-        picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
-
-    names = [info['chs'][k]['ch_name'] for k in picks]
-
-    if not names:
-        raise ValueError('No misc data channels found.')
-
-    ids = list(range(len(picks)))
-    size = len(picks)
-
-    # prepare square-like layout
-    ht = wd = np.sqrt(size)  # try square
-    if wd % 1:
-        wd, ht = int(wd + 1), int(ht)  # try n * (n-1) rectangle
-
-    if wd * ht < size:  # jump to the next full square
-        ht += 1
-
-    # setup position grid and fill up
-    x, y = np.meshgrid(np.linspace(0, 1, wd), np.linspace(0, 1, ht))
-
-    # scale boxes depending on size such that square is always filled
-    width = size * .15  # value depends on mne default full-view size
-    spacing = (width * ht)
-
-    # XXX : width and height are here assumed to be equal. Could be improved.
-    x, y = (x.ravel()[:size] * spacing, y.ravel()[:size] * spacing)
-
-    # calculate pos
-    pos = np.c_[x, y, width * np.ones(size), width * np.ones(size)]
-
-    # calculate box
-    box = (x.min() - 0.1 * width, x.max() + 1.1 * width,
-           y.min() - 0.1 * width, y.max() + 1.1 * width)
-
-    layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
-    return layout
-
-
-def find_layout(info=None, ch_type=None, chs=None):
-    """Choose a layout based on the channels in the info 'chs' field
-
-    Parameters
-    ----------
-    info : instance of mne.io.meas_info.Info | None
-        The measurement info.
-    ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
-        The channel type for selecting single channel layouts.
-        Defaults to None. Note, this argument will only be considered for
-        VectorView type layout. Use `meg` to force using the full layout
-        in situations where the info does only contain one sensor type.
-    chs : instance of mne.io.meas_info.Info | None
-        The measurement info. Defaults to None. This keyword is deprecated and
-        will be removed in MNE-Python 0.9. Use `info` instead.
-
-    Returns
-    -------
-    layout : Layout instance | None
-        None if layout not found.
-    """
-    msg = ("The 'chs' argument is deprecated and will be "
-           "removed in MNE-Python 0.9 Please use "
-           "'info' instead to pass the measurement info")
-    if chs is not None:
-        warnings.warn(msg, DeprecationWarning)
-    elif isinstance(info, list):
-        warnings.warn(msg, DeprecationWarning)
-        chs = info
-    else:
-        chs = info.get('chs')
-    if not chs:
-        raise ValueError('Could not find any channels. The info structure '
-                         'is not valid.')
-
-    our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
-    if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
-        raise ValueError('Invalid channel type (%s) requested '
-                         '`ch_type` must be %s' % (ch_type, our_types))
-
-    coil_types = set([ch['coil_type'] for ch in chs])
-    channel_types = set([ch['kind'] for ch in chs])
-
-    has_vv_mag = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_MAG_T1,
-                                                FIFF.FIFFV_COIL_VV_MAG_T2,
-                                                FIFF.FIFFV_COIL_VV_MAG_T3]])
-    has_vv_grad = any([k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
-                                                 FIFF.FIFFV_COIL_VV_PLANAR_T2,
-                                                 FIFF.FIFFV_COIL_VV_PLANAR_T3]])
-    has_vv_meg = has_vv_mag and has_vv_grad
-    has_vv_only_mag = has_vv_mag and not has_vv_grad
-    has_vv_only_grad = has_vv_grad and not has_vv_mag
-    is_old_vv = ' ' in chs[0]['ch_name']
-
-    has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
-    ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
-                       FIFF.FIFFV_COIL_CTF_REF_GRAD,
-                       FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
-    has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
-                    (FIFF.FIFFV_MEG_CH in channel_types and
-                     any([k in ctf_other_types for k in coil_types])))
-                    # hack due to MNE-C bug in IO of CTF
-    n_kit_grads = len([ch for ch in chs
-                       if ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD])
-
-    has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad])
-    has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
-                     FIFF.FIFFV_EEG_CH in channel_types)
-    has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
-    has_eeg_coils_only = has_eeg_coils and not has_any_meg
-
-    if ch_type == "meg" and not has_any_meg:
-        raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
-
-    if ch_type == "eeg" and not has_eeg_coils:
-        raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
-
-    if ((has_vv_meg and ch_type is None) or
-        (any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
-        layout_name = 'Vectorview-all'
-    elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
-        layout_name = 'Vectorview-mag'
-    elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
-        layout_name = 'Vectorview-grad'
-    elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
-          (has_eeg_coils_and_meg and ch_type == 'eeg')):
-        if not isinstance(info, dict):
-            raise RuntimeError('Cannot make EEG layout, no measurement info '
-                               'was passed to `find_layout`')
-        return make_eeg_layout(info)
-    elif has_4D_mag:
-        layout_name = 'magnesWH3600'
-    elif has_CTF_grad:
-        layout_name = 'CTF-275'
-    elif n_kit_grads == 157:
-        layout_name = 'KIT-157'
-    else:
-        return None
-
-    layout = read_layout(layout_name)
-    if not is_old_vv:
-        layout.names = _clean_names(layout.names, remove_whitespace=True)
-    if has_CTF_grad:
-        layout.names = _clean_names(layout.names, before_dash=True)
-
-    return layout
-
-
-def _find_topomap_coords(chs, layout=None):
-    """Try to guess the E/MEG layout and return appropriate topomap coordinates
-
-    Parameters
-    ----------
-    chs : list
-        A list of channels as contained in the info['chs'] entry.
-    layout : None | instance of Layout
-        Enforce using a specific layout. With None, a new map is generated.
-        With None, a layout is chosen based on the channels in the chs
-        parameter.
-
-    Returns
-    -------
-    coords : array, shape = (n_chs, 2)
-        2 dimensional coordinates for each sensor for a topomap plot.
-    """
-    if len(chs) == 0:
-        raise ValueError("Need more than 0 channels.")
-
-    if layout is not None:
-        pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
-        pos = np.asarray(pos)
-    else:
-        pos = _auto_topomap_coords(chs)
-
-    return pos
-
-
-def _cart_to_sph(x, y, z):
-    """Aux function"""
-    hypotxy = np.hypot(x, y)
-    r = np.hypot(hypotxy, z)
-    elev = np.arctan2(z, hypotxy)
-    az = np.arctan2(y, x)
-    return az, elev, r
-
-
-def _pol_to_cart(th, r):
-    """Aux function"""
-    x = r * np.cos(th)
-    y = r * np.sin(th)
-    return x, y
-
-
-def _auto_topomap_coords(chs):
-    """Make a 2 dimensional sensor map from sensor positions in an info dict
-
-    Parameters
-    ----------
-    chs : list
-        A list of channels as contained in the info['chs'] entry.
-
-    Returns
-    -------
-    locs : array, shape = (n_sensors, 2)
-        An array of positions of the 2 dimensional map.
-    """
-    locs3d = np.array([ch['loc'][:3] for ch in chs
-                       if ch['kind'] in [FIFF.FIFFV_MEG_CH,
-                                         FIFF.FIFFV_EEG_CH]])
-    if not np.any(locs3d):
-        raise RuntimeError('Cannot compute layout, no positions found')
-    x, y, z = locs3d[:, :3].T
-    az, el, r = _cart_to_sph(x, y, z)
-    locs2d = np.c_[_pol_to_cart(az, np.pi / 2 - el)]
-    return locs2d
-
-
-def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads'):
-    """Find the picks for pairing grad channels
-
-    Parameters
-    ----------
-    info : dict
-        An info dictionary containing channel information.
-    layout : Layout
-        The layout if available.
-    topomap_coords : bool
-        Return the coordinates for a topomap plot along with the picks. If
-        False, only picks are returned.
-    exclude : list of str | str
-        List of channels to exclude. If empty do not exclude any (default).
-        If 'bads', exclude channels in info['bads'].
-
-    Returns
-    -------
-    picks : array of int
-        Picks for the grad channels, ordered in pairs.
-    coords : array, shape = (n_grad_channels, 3)
-        Coordinates for a topomap plot (optional, only returned if
-        topomap_coords == True).
-    """
-    # find all complete pairs of grad channels
-    pairs = defaultdict(list)
-    grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
-    for i in grad_picks:
-        ch = info['chs'][i]
-        name = ch['ch_name']
-        if name.startswith('MEG'):
-            if name.endswith(('2', '3')):
-                key = name[-4:-1]
-                pairs[key].append(ch)
-    pairs = [p for p in pairs.values() if len(p) == 2]
-    if len(pairs) == 0:
-        raise ValueError("No 'grad' channel pairs found.")
-
-    # find the picks corresponding to the grad channels
-    grad_chs = sum(pairs, [])
-    ch_names = info['ch_names']
-    picks = [ch_names.index(ch['ch_name']) for ch in grad_chs]
-
-    if topomap_coords:
-        shape = (len(pairs), 2, -1)
-        coords = (_find_topomap_coords(grad_chs, layout)
-                  .reshape(shape).mean(axis=1))
-        return picks, coords
-    else:
-        return picks
-
-
-def _pair_grad_sensors_from_ch_names(ch_names):
-    """Find the indexes for pairing grad channels
-
-    Parameters
-    ----------
-    ch_names : list of str
-        A list of channel names.
-
-    Returns
-    -------
-    indexes : list of int
-        Indexes of the grad channels, ordered in pairs.
-    """
-    pairs = defaultdict(list)
-    for i, name in enumerate(ch_names):
-        if name.startswith('MEG'):
-            if name.endswith(('2', '3')):
-                key = name[-4:-1]
-                pairs[key].append(i)
-
-    pairs = [p for p in pairs.values() if len(p) == 2]
-
-    grad_chs = sum(pairs, [])
-    return grad_chs
-
-
-def _merge_grad_data(data):
-    """Merge data from channel pairs using the RMS
-
-    Parameters
-    ----------
-    data : array, shape = (n_channels, n_times)
-        Data for channels, ordered in pairs.
-
-    Returns
-    -------
-    data : array, shape = (n_channels / 2, n_times)
-        The root mean square for each pair.
-    """
-    data = data.reshape((len(data) // 2, 2, -1))
-    data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
-    return data
diff --git a/mne/layouts/tests/test_layout.py b/mne/layouts/tests/test_layout.py
deleted file mode 100644
index afb415d..0000000
--- a/mne/layouts/tests/test_layout.py
+++ /dev/null
@@ -1,218 +0,0 @@
-from __future__ import print_function
-# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
-#          Denis Engemann <denis.engemann at gmail.com>
-#          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
-#          Eric Larson <larson.eric.d at gmail.com>
-#
-# License: Simplified BSD
-
-import copy
-import os.path as op
-import warnings
-
-import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_array_equal
-from nose.tools import assert_true, assert_raises
-
-from mne.layouts import (make_eeg_layout, make_grid_layout, read_layout,
-                         find_layout)
-
-from mne import pick_types, pick_info
-from mne.io import Raw
-from mne.io import read_raw_kit
-from mne.utils import _TempDir
-
-warnings.simplefilter('always')
-
-fif_fname = op.join(op.dirname(__file__), '..', '..', 'io',
-                   'tests', 'data', 'test_raw.fif')
-
-lout_path = op.join(op.dirname(__file__), '..', '..', 'io',
-                    'tests', 'data')
-
-bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
-                  'tests', 'data')
-
-fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
-                        'data', 'test_ctf_comp_raw.fif')
-
-fname_kit_157 = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
-                        'tests', 'data', 'test.sqd')
-
-test_info = {'ch_names': ['ICA 001', 'ICA 002', 'EOG 061'],
- 'chs': [{'cal': 1,
-   'ch_name': 'ICA 001',
-   'coil_trans': None,
-   'coil_type': 0,
-   'coord_Frame': 0,
-   'eeg_loc': None,
-   'kind': 502,
-   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
-                   dtype=np.float32),
-   'logno': 1,
-   'range': 1.0,
-   'scanno': 1,
-   'unit': -1,
-   'unit_mul': 0},
-  {'cal': 1,
-   'ch_name': 'ICA 002',
-   'coil_trans': None,
-   'coil_type': 0,
-   'coord_Frame': 0,
-   'eeg_loc': None,
-   'kind': 502,
-   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
-                    dtype=np.float32),
-   'logno': 2,
-   'range': 1.0,
-   'scanno': 2,
-   'unit': -1,
-   'unit_mul': 0},
-  {'cal': 0.002142000012099743,
-   'ch_name': 'EOG 061',
-   'coil_trans': None,
-   'coil_type': 1,
-   'coord_frame': 0,
-   'eeg_loc': None,
-   'kind': 202,
-   'loc': np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
-                    dtype=np.float32),
-   'logno': 61,
-   'range': 1.0,
-   'scanno': 376,
-   'unit': 107,
-   'unit_mul': 0}],
-   'nchan': 3}
-
-tempdir = _TempDir()
-
-
-def test_io_layout_lout():
-    """Test IO with .lout files"""
-    layout = read_layout('Vectorview-all', scale=False)
-    layout.save(op.join(tempdir, 'foobar.lout'))
-    layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
-                              scale=False)
-    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
-    assert_true(layout.names, layout_read.names)
-
-    print(layout)  # test repr
-
-
-def test_io_layout_lay():
-    """Test IO with .lay files"""
-    layout = read_layout('CTF151', scale=False)
-    layout.save(op.join(tempdir, 'foobar.lay'))
-    layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
-                              scale=False)
-    assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
-    assert_true(layout.names, layout_read.names)
-
-
-def test_make_eeg_layout():
-    """ Test creation of EEG layout """
-    tmp_name = 'foo'
-    lout_name = 'test_raw'
-    lout_orig = read_layout(kind=lout_name, path=lout_path)
-    layout = make_eeg_layout(Raw(fif_fname).info)
-    layout.save(op.join(tempdir, tmp_name + '.lout'))
-    lout_new = read_layout(kind=tmp_name, path=tempdir)
-    assert_array_equal(lout_new.kind, tmp_name)
-    assert_array_equal(lout_orig.pos, lout_new.pos)
-    assert_array_equal(lout_orig.names, lout_new.names)
-
-
-def test_make_grid_layout():
-    """ Test creation of grid layout """
-    tmp_name = 'bar'
-    lout_name = 'test_ica'
-    lout_orig = read_layout(kind=lout_name, path=lout_path)
-    layout = make_grid_layout(test_info)
-    layout.save(op.join(tempdir, tmp_name + '.lout'))
-    lout_new = read_layout(kind=tmp_name, path=tempdir)
-    assert_array_equal(lout_new.kind, tmp_name)
-    assert_array_equal(lout_orig.pos, lout_new.pos)
-    assert_array_equal(lout_orig.names, lout_new.names)
-
-
-def test_find_layout():
-    """Test finding layout"""
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        find_layout(chs=test_info['chs'])
-        assert_true(w[0].category == DeprecationWarning)
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        find_layout(test_info['chs'])
-        assert_true(w[0].category == DeprecationWarning)
-    assert_raises(ValueError, find_layout, dict())
-    assert_raises(ValueError, find_layout, test_info, ch_type='meep')
-
-    sample_info = Raw(fif_fname).info
-    grads = pick_types(sample_info, meg='grad')
-    sample_info2 = pick_info(sample_info, grads)
-
-    mags = pick_types(sample_info, meg='mag')
-    sample_info3 = pick_info(sample_info, mags)
-
-    # mock new convention
-    sample_info4 = copy.deepcopy(sample_info)
-    for ii, name in enumerate(sample_info4['ch_names']):
-        new = name.replace(' ', '')
-        sample_info4['ch_names'][ii] = new
-        sample_info4['chs'][ii]['ch_name'] = new
-
-    mags = pick_types(sample_info, meg=False, eeg=True)
-    sample_info5 = pick_info(sample_info, mags)
-
-    lout = find_layout(sample_info, ch_type=None)
-    assert_true(lout.kind == 'Vectorview-all')
-    assert_true(all(' ' in k for k in lout.names))
-
-    lout = find_layout(sample_info2, ch_type='meg')
-    assert_true(lout.kind == 'Vectorview-all')
-
-    # test new vector-view
-    lout = find_layout(sample_info4, ch_type=None)
-    assert_true(lout.kind == 'Vectorview-all')
-    assert_true(all(not ' ' in k for k in lout.names))
-
-    lout = find_layout(sample_info, ch_type='grad')
-    assert_true(lout.kind == 'Vectorview-grad')
-    lout = find_layout(sample_info2)
-    assert_true(lout.kind == 'Vectorview-grad')
-    lout = find_layout(sample_info2, ch_type='grad')
-    assert_true(lout.kind == 'Vectorview-grad')
-    lout = find_layout(sample_info2, ch_type='meg')
-    assert_true(lout.kind == 'Vectorview-all')
-
-
-    lout = find_layout(sample_info, ch_type='mag')
-    assert_true(lout.kind == 'Vectorview-mag')
-    lout = find_layout(sample_info3)
-    assert_true(lout.kind == 'Vectorview-mag')
-    lout = find_layout(sample_info3, ch_type='mag')
-    assert_true(lout.kind == 'Vectorview-mag')
-    lout = find_layout(sample_info3, ch_type='meg')
-    assert_true(lout.kind == 'Vectorview-all')
-    #
-    lout = find_layout(sample_info, ch_type='eeg')
-    assert_true(lout.kind == 'EEG')
-    lout = find_layout(sample_info5)
-    assert_true(lout.kind == 'EEG')
-    lout = find_layout(sample_info5, ch_type='eeg')
-    assert_true(lout.kind == 'EEG')
-    # no common layout, 'meg' option not supported
-
-    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
-    lout = find_layout(Raw(fname_bti_raw).info)
-    assert_true(lout.kind == 'magnesWH3600')
-
-    lout = find_layout(Raw(fname_ctf_raw).info)
-    assert_true(lout.kind == 'CTF-275')
-
-    lout = find_layout(read_raw_kit(fname_kit_157).info)
-    assert_true(lout.kind == 'KIT-157')
-
-    sample_info5['dig'] = []
-    assert_raises(RuntimeError, find_layout, sample_info5)
diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py
index 07174dc..b48f805 100644
--- a/mne/minimum_norm/__init__.py
+++ b/mne/minimum_norm/__init__.py
@@ -3,7 +3,8 @@
 from .inverse import (InverseOperator, read_inverse_operator, apply_inverse,
                       apply_inverse_raw, make_inverse_operator,
                       apply_inverse_epochs, write_inverse_operator,
-                      compute_rank_inverse)
+                      compute_rank_inverse, prepare_inverse_operator,
+                      estimate_snr)
 from .psf_ctf import point_spread_function, cross_talk_function
 from .time_frequency import (source_band_induced_power, source_induced_power,
                              compute_source_psd, compute_source_psd_epochs)
diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py
index cd572c9..eca2a24 100644
--- a/mne/minimum_norm/inverse.py
+++ b/mne/minimum_norm/inverse.py
@@ -1,5 +1,6 @@
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -15,20 +16,21 @@ from ..io.tag import find_tag
 from ..io.matrix import (_read_named_matrix, _transpose_named_matrix,
                          write_named_matrix)
 from ..io.proj import _read_proj, make_projector, _write_proj
+from ..io.proj import _has_eeg_average_ref_proj
 from ..io.tree import dir_tree_find
 from ..io.write import (write_int, write_float_matrix, start_file,
                         start_block, end_block, end_file, write_float,
                         write_coord_trans, write_string)
 
 from ..io.pick import channel_type, pick_info, pick_types
-from ..cov import prepare_noise_cov, _read_cov, _write_cov
-from ..forward import (compute_depth_prior, read_forward_meas_info,
+from ..cov import prepare_noise_cov, _read_cov, _write_cov, Covariance
+from ..forward import (compute_depth_prior, _read_forward_meas_info,
                        write_forward_meas_info, is_fixed_orient,
-                       compute_orient_prior, _to_fixed_ori)
-from ..source_space import (read_source_spaces_from_tree,
+                       compute_orient_prior, convert_forward_solution)
+from ..source_space import (_read_source_spaces_from_tree,
                             find_source_space_hemi, _get_vertno,
                             _write_source_spaces_to_fid, label_src_vertno_sel)
-from ..transforms import invert_transform, transform_surface_to
+from ..transforms import _ensure_trans, transform_surface_to
 from ..source_estimate import _make_stc
 from ..utils import check_fname, logger, verbose
 from functools import reduce
@@ -74,7 +76,7 @@ def _pick_channels_inverse_operator(ch_names, inv):
     an inverse operator
     """
     sel = []
-    for name in inv['noise_cov']['names']:
+    for name in inv['noise_cov'].ch_names:
         if name in ch_names:
             sel.append(ch_names.index(name))
         else:
@@ -101,6 +103,10 @@ def read_inverse_operator(fname, verbose=None):
     -------
     inv : instance of InverseOperator
         The inverse operator.
+
+    See Also
+    --------
+    write_inverse_operator, make_inverse_operator
     """
     check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
 
@@ -109,217 +115,196 @@ def read_inverse_operator(fname, verbose=None):
     #
     logger.info('Reading inverse operator decomposition from %s...'
                 % fname)
-    fid, tree, _ = fiff_open(fname, preload=True)
-    #
-    #   Find all inverse operators
-    #
-    invs = dir_tree_find(tree, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
-    if invs is None or len(invs) < 1:
-        fid.close()
-        raise Exception('No inverse solutions in %s' % fname)
+    f, tree, _ = fiff_open(fname, preload=True)
+    with f as fid:
+        #
+        #   Find all inverse operators
+        #
+        invs = dir_tree_find(tree, FIFF.FIFFB_MNE_INVERSE_SOLUTION)
+        if invs is None or len(invs) < 1:
+            raise Exception('No inverse solutions in %s' % fname)
 
-    invs = invs[0]
-    #
-    #   Parent MRI data
-    #
-    parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
-    if len(parent_mri) == 0:
-        fid.close()
-        raise Exception('No parent MRI information in %s' % fname)
-    parent_mri = parent_mri[0]  # take only first one
+        invs = invs[0]
+        #
+        #   Parent MRI data
+        #
+        parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
+        if len(parent_mri) == 0:
+            raise Exception('No parent MRI information in %s' % fname)
+        parent_mri = parent_mri[0]  # take only first one
 
-    logger.info('    Reading inverse operator info...')
-    #
-    #   Methods and source orientations
-    #
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INCLUDED_METHODS)
-    if tag is None:
-        fid.close()
-        raise Exception('Modalities not found')
+        logger.info('    Reading inverse operator info...')
+        #
+        #   Methods and source orientations
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INCLUDED_METHODS)
+        if tag is None:
+            raise Exception('Modalities not found')
 
-    inv = dict()
-    inv['methods'] = int(tag.data)
+        inv = dict()
+        inv['methods'] = int(tag.data)
 
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
-    if tag is None:
-        fid.close()
-        raise Exception('Source orientation constraints not found')
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_ORIENTATION)
+        if tag is None:
+            raise Exception('Source orientation constraints not found')
 
-    inv['source_ori'] = int(tag.data)
+        inv['source_ori'] = int(tag.data)
 
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
-    if tag is None:
-        fid.close()
-        raise Exception('Number of sources not found')
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
+        if tag is None:
+            raise Exception('Number of sources not found')
 
-    inv['nsource'] = int(tag.data)
-    inv['nchan'] = 0
-    #
-    #   Coordinate frame
-    #
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_COORD_FRAME)
-    if tag is None:
-        fid.close()
-        raise Exception('Coordinate frame tag not found')
+        inv['nsource'] = int(tag.data)
+        inv['nchan'] = 0
+        #
+        #   Coordinate frame
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_COORD_FRAME)
+        if tag is None:
+            raise Exception('Coordinate frame tag not found')
 
-    inv['coord_frame'] = tag.data
+        inv['coord_frame'] = tag.data
 
-    #
-    #   Units
-    #
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT)
-    if tag is not None:
-        if tag.data == FIFF.FIFF_UNIT_AM:
-            inv['units'] = 'Am'
-        elif tag.data == FIFF.FIFF_UNIT_AM_M2:
-            inv['units'] = 'Am/m^2'
-        elif tag.data == FIFF.FIFF_UNIT_AM_M3:
-            inv['units'] = 'Am/m^3'
-        else:
-            inv['units'] = None
-    else:
-        inv['units'] = None
-    #
-    #   The actual source orientation vectors
-    #
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS)
-    if tag is None:
-        fid.close()
-        raise Exception('Source orientation information not found')
+        #
+        #   Units
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT)
+        unit_dict = {FIFF.FIFF_UNIT_AM: 'Am',
+                     FIFF.FIFF_UNIT_AM_M2: 'Am/m^2',
+                     FIFF.FIFF_UNIT_AM_M3: 'Am/m^3'}
+        inv['units'] = unit_dict.get(int(getattr(tag, 'data', -1)), None)
 
-    inv['source_nn'] = tag.data
-    logger.info('    [done]')
-    #
-    #   The SVD decomposition...
-    #
-    logger.info('    Reading inverse operator decomposition...')
-    tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SING)
-    if tag is None:
-        fid.close()
-        raise Exception('Singular values not found')
-
-    inv['sing'] = tag.data
-    inv['nchan'] = len(inv['sing'])
-    #
-    #   The eigenleads and eigenfields
-    #
-    inv['eigen_leads_weighted'] = False
-    eigen_leads = _read_named_matrix(fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS)
-    if eigen_leads is None:
-        inv['eigen_leads_weighted'] = True
-        eigen_leads = _read_named_matrix(fid, invs,
-                                         FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED)
-    if eigen_leads is None:
-        raise ValueError('Eigen leads not found in inverse operator.')
-    #
-    #   Having the eigenleads as columns is better for the inverse calculations
-    #
-    inv['eigen_leads'] = _transpose_named_matrix(eigen_leads, copy=False)
-    inv['eigen_fields'] = _read_named_matrix(fid, invs,
-                                             FIFF.FIFF_MNE_INVERSE_FIELDS)
-    logger.info('    [done]')
-    #
-    #   Read the covariance matrices
-    #
-    inv['noise_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV)
-    logger.info('    Noise covariance matrix read.')
+        #
+        #   The actual source orientation vectors
+        #
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS)
+        if tag is None:
+            raise Exception('Source orientation information not found')
 
-    inv['source_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
-    logger.info('    Source covariance matrix read.')
-    #
-    #   Read the various priors
-    #
-    inv['orient_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
-    if inv['orient_prior'] is not None:
-        logger.info('    Orientation priors read.')
+        inv['source_nn'] = tag.data
+        logger.info('    [done]')
+        #
+        #   The SVD decomposition...
+        #
+        logger.info('    Reading inverse operator decomposition...')
+        tag = find_tag(fid, invs, FIFF.FIFF_MNE_INVERSE_SING)
+        if tag is None:
+            raise Exception('Singular values not found')
 
-    inv['depth_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
-    if inv['depth_prior'] is not None:
-        logger.info('    Depth priors read.')
+        inv['sing'] = tag.data
+        inv['nchan'] = len(inv['sing'])
+        #
+        #   The eigenleads and eigenfields
+        #
+        inv['eigen_leads_weighted'] = False
+        eigen_leads = _read_named_matrix(
+            fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS)
+        if eigen_leads is None:
+            inv['eigen_leads_weighted'] = True
+            eigen_leads = _read_named_matrix(
+                fid, invs, FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED)
+        if eigen_leads is None:
+            raise ValueError('Eigen leads not found in inverse operator.')
+        #
+        #   Having the eigenleads as cols is better for the inverse calcs
+        #
+        inv['eigen_leads'] = _transpose_named_matrix(eigen_leads, copy=False)
+        inv['eigen_fields'] = _read_named_matrix(fid, invs,
+                                                 FIFF.FIFF_MNE_INVERSE_FIELDS)
+        logger.info('    [done]')
+        #
+        #   Read the covariance matrices
+        #
+        inv['noise_cov'] = Covariance(
+            **_read_cov(fid, invs, FIFF.FIFFV_MNE_NOISE_COV, limited=True))
+        logger.info('    Noise covariance matrix read.')
 
-    inv['fmri_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
-    if inv['fmri_prior'] is not None:
-        logger.info('    fMRI priors read.')
+        inv['source_cov'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_SOURCE_COV)
+        logger.info('    Source covariance matrix read.')
+        #
+        #   Read the various priors
+        #
+        inv['orient_prior'] = _read_cov(fid, invs,
+                                        FIFF.FIFFV_MNE_ORIENT_PRIOR_COV)
+        if inv['orient_prior'] is not None:
+            logger.info('    Orientation priors read.')
 
-    #
-    #   Read the source spaces
-    #
-    inv['src'] = read_source_spaces_from_tree(fid, tree, add_geom=False)
+        inv['depth_prior'] = _read_cov(fid, invs,
+                                       FIFF.FIFFV_MNE_DEPTH_PRIOR_COV)
+        if inv['depth_prior'] is not None:
+            logger.info('    Depth priors read.')
 
-    for s in inv['src']:
-        s['id'] = find_source_space_hemi(s)
+        inv['fmri_prior'] = _read_cov(fid, invs, FIFF.FIFFV_MNE_FMRI_PRIOR_COV)
+        if inv['fmri_prior'] is not None:
+            logger.info('    fMRI priors read.')
 
-    #
-    #   Get the MRI <-> head coordinate transformation
-    #
-    tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
-    if tag is None:
-        fid.close()
-        raise Exception('MRI/head coordinate transformation not found')
-    else:
-        mri_head_t = tag.data
-        if mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or \
-                        mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD:
-            mri_head_t = invert_transform(mri_head_t)
-            if mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or \
-                        mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD:
-                fid.close()
-                raise Exception('MRI/head coordinate transformation '
-                                'not found')
-
-    inv['mri_head_t'] = mri_head_t
+        #
+        #   Read the source spaces
+        #
+        inv['src'] = _read_source_spaces_from_tree(fid, tree,
+                                                   patch_stats=False)
 
-    #
-    # get parent MEG info
-    #
-    inv['info'] = read_forward_meas_info(tree, fid)
+        for s in inv['src']:
+            s['id'] = find_source_space_hemi(s)
 
-    #
-    #   Transform the source spaces to the correct coordinate frame
-    #   if necessary
-    #
-    if inv['coord_frame'] != FIFF.FIFFV_COORD_MRI and \
-            inv['coord_frame'] != FIFF.FIFFV_COORD_HEAD:
-        fid.close()
-        raise Exception('Only inverse solutions computed in MRI or '
-                        'head coordinates are acceptable')
+        #
+        #   Get the MRI <-> head coordinate transformation
+        #
+        tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)
+        if tag is None:
+            raise Exception('MRI/head coordinate transformation not found')
+        mri_head_t = _ensure_trans(tag.data, 'mri', 'head')
 
-    #
-    #  Number of averages is initially one
-    #
-    inv['nave'] = 1
-    #
-    #  We also need the SSP operator
-    #
-    inv['projs'] = _read_proj(fid, tree)
+        inv['mri_head_t'] = mri_head_t
 
-    #
-    #  Some empty fields to be filled in later
-    #
-    inv['proj'] = []       # This is the projector to apply to the data
-    inv['whitener'] = []   # This whitens the data
-    inv['reginv'] = []     # This the diagonal matrix implementing
-                           # regularization and the inverse
-    inv['noisenorm'] = []  # These are the noise-normalization factors
-    #
-    nuse = 0
-    for k in range(len(inv['src'])):
-        try:
-            inv['src'][k] = transform_surface_to(inv['src'][k],
-                                                 inv['coord_frame'],
-                                                 mri_head_t)
-        except Exception as inst:
-            fid.close()
-            raise Exception('Could not transform source space (%s)' % inst)
-
-        nuse += inv['src'][k]['nuse']
-
-    logger.info('    Source spaces transformed to the inverse solution '
-                'coordinate frame')
-    #
-    #   Done!
-    #
-    fid.close()
+        #
+        # get parent MEG info
+        #
+        inv['info'] = _read_forward_meas_info(tree, fid)
+
+        #
+        #   Transform the source spaces to the correct coordinate frame
+        #   if necessary
+        #
+        if inv['coord_frame'] not in (FIFF.FIFFV_COORD_MRI,
+                                      FIFF.FIFFV_COORD_HEAD):
+            raise Exception('Only inverse solutions computed in MRI or '
+                            'head coordinates are acceptable')
+
+        #
+        #  Number of averages is initially one
+        #
+        inv['nave'] = 1
+        #
+        #  We also need the SSP operator
+        #
+        inv['projs'] = _read_proj(fid, tree)
+
+        #
+        #  Some empty fields to be filled in later
+        #
+        inv['proj'] = []       # This is the projector to apply to the data
+        inv['whitener'] = []   # This whitens the data
+        # This the diagonal matrix implementing regularization and the inverse
+        inv['reginv'] = []
+        inv['noisenorm'] = []  # These are the noise-normalization factors
+        #
+        nuse = 0
+        for k in range(len(inv['src'])):
+            try:
+                inv['src'][k] = transform_surface_to(inv['src'][k],
+                                                     inv['coord_frame'],
+                                                     mri_head_t)
+            except Exception as inst:
+                raise Exception('Could not transform source space (%s)' % inst)
+
+            nuse += inv['src'][k]['nuse']
+
+        logger.info('    Source spaces transformed to the inverse solution '
+                    'coordinate frame')
+        #
+        #   Done!
+        #
 
     return InverseOperator(inv)
 
@@ -336,6 +321,10 @@ def write_inverse_operator(fname, inv, verbose=None):
         The inverse operator.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_inverse_operator
     """
     check_fname(fname, 'inverse operator', ('-inv.fif', '-inv.fif.gz'))
 
@@ -379,16 +368,11 @@ def write_inverse_operator(fname, inv, verbose=None):
     write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, inv['methods'])
     write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, inv['coord_frame'])
 
-    if 'units' in inv:
-        if inv['units'] == 'Am':
-            write_int(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT,
-                      FIFF.FIFF_UNIT_AM)
-        elif inv['units'] == 'Am/m^2':
-            write_int(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT,
-                      FIFF.FIFF_UNIT_AM_M2)
-        elif inv['units'] == 'Am/m^3':
-            write_int(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT,
-                      FIFF.FIFF_UNIT_AM_M3)
+    udict = {'Am': FIFF.FIFF_UNIT_AM,
+             'Am/m^2': FIFF.FIFF_UNIT_AM_M2,
+             'Am/m^3': FIFF.FIFF_UNIT_AM_M3}
+    if 'units' in inv and inv['units'] is not None:
+        write_int(fid, FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT, udict[inv['units']])
 
     write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, inv['source_ori'])
     write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, inv['nsource'])
@@ -482,7 +466,7 @@ def _check_ch_names(inv, info):
 
     inv_ch_names = inv['eigen_fields']['col_names']
 
-    if inv['noise_cov']['names'] != inv_ch_names:
+    if inv['noise_cov'].ch_names != inv_ch_names:
         raise ValueError('Channels in inverse operator eigen fields do not '
                          'match noise covariance channels.')
     data_ch_names = info['ch_names']
@@ -717,46 +701,41 @@ def _check_method(method):
     return method
 
 
-def _check_ori(pick_ori, pick_normal):
-    if pick_normal is not None:
-        warnings.warn('DEPRECATION: The pick_normal parameter has been '
-                      'changed to pick_ori. Please update your code.')
-        pick_ori = pick_normal
-    if pick_ori is True:
-        warnings.warn('DEPRECATION: The pick_ori parameter should now be None '
-                      'or "normal".')
-        pick_ori = "normal"
-    elif pick_ori is False:
-        warnings.warn('DEPRECATION: The pick_ori parameter should now be None '
-                      'or "normal".')
-        pick_ori = None
-
-    if pick_ori not in [None, "normal"]:
-        raise ValueError('The pick_ori parameter should now be None or '
-                         '"normal".')
+def _check_ori(pick_ori):
+    if pick_ori is not None and pick_ori != 'normal':
+        raise RuntimeError('pick_ori must be None or "normal", not %s'
+                           % pick_ori)
     return pick_ori
 
 
+def _check_reference(inst):
+    """Aux funcion"""
+    if "eeg" in inst and not _has_eeg_average_ref_proj(inst.info['projs']):
+        raise ValueError('EEG average reference is mandatory for inverse '
+                         'modeling.')
+    if inst.info['custom_ref_applied']:
+        raise ValueError('Custom EEG reference is not allowed for inverse '
+                         'modeling.')
+
+
 def _subject_from_inverse(inverse_operator):
     """Get subject id from inverse operator"""
     return inverse_operator['src'][0].get('subject_his_id', None)
 
 
 @verbose
-def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
-                  pick_ori=None, verbose=None, pick_normal=None):
+def apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
+                  method="dSPM", pick_ori=None,
+                  prepared=False, label=None, verbose=None):
     """Apply inverse operator to evoked data
 
-    Computes a L2-norm inverse solution
-    Actual code using these principles might be different because
-    the inverse operator is often reused across data sets.
-
     Parameters
     ----------
     evoked : Evoked object
         Evoked data.
-    inverse_operator: dict
-        Inverse operator read with mne.read_inverse_operator.
+    inverse_operator: instance of InverseOperator
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
     lambda2 : float
         The regularization parameter.
     method : "MNE" | "dSPM" | "sLORETA"
@@ -765,6 +744,11 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
         If "normal", rather than pooling the orientations by taking the norm,
         only the radial component is kept. This is only implemented
         when working with loose orientations.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
+    label : Label | None
+        Restricts the source estimates to a given label. If None,
+        source estimates will be computed for the entire source space.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -772,9 +756,15 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
     -------
     stc : SourceEstimate | VolSourceEstimate
         The source estimates
+
+    See Also
+    --------
+    apply_inverse_raw : Apply inverse operator to raw object
+    apply_inverse_epochs : Apply inverse operator to epochs object
     """
+    _check_reference(evoked)
     method = _check_method(method)
-    pick_ori = _check_ori(pick_ori, pick_normal)
+    pick_ori = _check_ori(pick_ori)
     #
     #   Set up the inverse according to the parameters
     #
@@ -782,18 +772,21 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
 
     _check_ch_names(inverse_operator, evoked.info)
 
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
     #
     #   Pick the correct channels from the data
     #
     sel = _pick_channels_inverse_operator(evoked.ch_names, inv)
     logger.info('Picked %d channels from the data' % len(sel))
     logger.info('Computing inverse...')
-    K, noise_norm, _ = _assemble_kernel(inv, None, method, pick_ori)
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
     sol = np.dot(K, evoked.data[sel])  # apply imaging kernel
 
-    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori is None)
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
 
     if is_free_ori:
         logger.info('combining the current components...')
@@ -805,7 +798,6 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
 
     tstep = 1.0 / evoked.info['sfreq']
     tmin = float(evoked.times[0])
-    vertno = _get_vertno(inv['src'])
     subject = _subject_from_inverse(inverse_operator)
 
     stc = _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
@@ -818,21 +810,17 @@ def apply_inverse(evoked, inverse_operator, lambda2, method="dSPM",
 @verbose
 def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
                       label=None, start=None, stop=None, nave=1,
-                      time_func=None, pick_ori=None,
-                      buffer_size=None, verbose=None,
-                      pick_normal=None):
+                      time_func=None, pick_ori=None, buffer_size=None,
+                      prepared=False, verbose=None):
     """Apply inverse operator to Raw data
 
-    Computes a L2-norm inverse solution
-    Actual code using these principles might be different because
-    the inverse operator is often reused across data sets.
-
     Parameters
     ----------
     raw : Raw object
         Raw data.
     inverse_operator : dict
-        Inverse operator read with mne.read_inverse_operator.
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
     lambda2 : float
         The regularization parameter.
     method : "MNE" | "dSPM" | "sLORETA"
@@ -861,6 +849,8 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
         buffer_size << data length).
         Note that this setting has no effect for fixed-orientation inverse
         operators.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -868,16 +858,25 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
     -------
     stc : SourceEstimate | VolSourceEstimate
         The source estimates.
+
+    See Also
+    --------
+    apply_inverse_epochs : Apply inverse operator to epochs object
+    apply_inverse : Apply inverse operator to evoked object
     """
+    _check_reference(raw)
     method = _check_method(method)
-    pick_ori = _check_ori(pick_ori, pick_normal)
+    pick_ori = _check_ori(pick_ori)
 
     _check_ch_names(inverse_operator, raw.info)
 
     #
     #   Set up the inverse according to the parameters
     #
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
     #
     #   Pick the correct channels from the data
     #
@@ -892,8 +891,8 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
 
     K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
 
-    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori is None)
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
 
     if buffer_size is not None and is_free_ori:
         # Process the data in segments to conserve memory
@@ -933,17 +932,20 @@ def apply_inverse_raw(raw, inverse_operator, lambda2, method="dSPM",
 
 def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method='dSPM',
                               label=None, nave=1, pick_ori=None,
-                              verbose=None, pick_normal=None):
+                              prepared=False, verbose=None):
     """ see apply_inverse_epochs """
     method = _check_method(method)
-    pick_ori = _check_ori(pick_ori, pick_normal)
+    pick_ori = _check_ori(pick_ori)
 
     _check_ch_names(inverse_operator, epochs.info)
 
     #
     #   Set up the inverse according to the parameters
     #
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
     #
     #   Pick the correct channels from the data
     #
@@ -955,8 +957,8 @@ def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method='dSPM',
     tstep = 1.0 / epochs.info['sfreq']
     tmin = epochs.times[0]
 
-    is_free_ori = (inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-                   and pick_ori is None)
+    is_free_ori = (inverse_operator['source_ori'] ==
+                   FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None)
 
     if not is_free_ori and noise_norm is not None:
         # premultiply kernel with noise normalization
@@ -992,19 +994,17 @@ def _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2, method='dSPM',
 @verbose
 def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
                          label=None, nave=1, pick_ori=None,
-                         return_generator=False, verbose=None,
-                         pick_normal=None):
+                         return_generator=False,
+                         prepared=False, verbose=None):
     """Apply inverse operator to Epochs
 
-    Computes a L2-norm inverse solution on each epochs and returns
-    single trial source estimates.
-
     Parameters
     ----------
     epochs : Epochs object
         Single trial epochs.
     inverse_operator : dict
-        Inverse operator read with mne.read_inverse_operator.
+        Inverse operator returned from `mne.read_inverse_operator`,
+        `prepare_inverse_operator` or `make_inverse_operator`.
     lambda2 : float
         The regularization parameter.
     method : "MNE" | "dSPM" | "sLORETA"
@@ -1022,6 +1022,8 @@ def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
     return_generator : bool
         Return a generator object instead of a list. This allows iterating
         over the stcs without having to keep them all in memory.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1029,11 +1031,17 @@ def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
     -------
     stc : list of SourceEstimate or VolSourceEstimate
         The source estimates for all epochs.
+
+    See Also
+    --------
+    apply_inverse_raw : Apply inverse operator to raw object
+    apply_inverse : Apply inverse operator to evoked object
     """
+    _check_reference(epochs)
     stcs = _apply_inverse_epochs_gen(epochs, inverse_operator, lambda2,
                                      method=method, label=label, nave=nave,
                                      pick_ori=pick_ori, verbose=verbose,
-                                     pick_normal=pick_normal)
+                                     prepared=prepared)
 
     if not return_generator:
         # return a list
@@ -1042,6 +1050,7 @@ def apply_inverse_epochs(epochs, inverse_operator, lambda2, method="dSPM",
     return stcs
 
 
+'''
 def _xyz2lf(Lf_xyz, normals):
     """Reorient leadfield to one component matching the normal to the cortex
 
@@ -1084,24 +1093,27 @@ def _xyz2lf(Lf_xyz, normals):
 
     Lf_cortex = Lf_cortex.reshape(n_sensors, n_dipoles)
     return Lf_cortex
+'''
 
 
 ###############################################################################
 # Assemble the inverse operator
 
 @verbose
-def _prepare_forward(forward, info, noise_cov, pca=False, verbose=None):
+def _prepare_forward(forward, info, noise_cov, pca=False, rank=None,
+                     verbose=None):
     """Util function to prepare forward solution for inverse solvers
     """
-    fwd_ch_names = [c['ch_name'] for c in forward['info']['chs']]
+    # fwd['sol']['row_names'] may be different order from fwd['info']['chs']
+    fwd_sol_ch_names = forward['sol']['row_names']
     ch_names = [c['ch_name'] for c in info['chs']
-                if (c['ch_name'] not in info['bads']
-                    and c['ch_name'] not in noise_cov['bads'])
-                and (c['ch_name'] in fwd_ch_names
-                     and c['ch_name'] in noise_cov.ch_names)]
+                if ((c['ch_name'] not in info['bads'] and
+                     c['ch_name'] not in noise_cov['bads']) and
+                    (c['ch_name'] in fwd_sol_ch_names and
+                     c['ch_name'] in noise_cov.ch_names))]
 
     if not len(info['bads']) == len(noise_cov['bads']) or \
-            not all([b in noise_cov['bads'] for b in info['bads']]):
+            not all(b in noise_cov['bads'] for b in info['bads']):
         logger.info('info["bads"] and noise_cov["bads"] do not match, '
                     'excluding bad channels from both')
 
@@ -1111,7 +1123,7 @@ def _prepare_forward(forward, info, noise_cov, pca=False, verbose=None):
     #
     #   Handle noise cov
     #
-    noise_cov = prepare_noise_cov(noise_cov, info, ch_names)
+    noise_cov = prepare_noise_cov(noise_cov, info, ch_names, rank)
 
     #   Omit the zeroes due to projection
     eig = noise_cov['eig']
@@ -1130,8 +1142,12 @@ def _prepare_forward(forward, info, noise_cov, pca=False, verbose=None):
 
     gain = forward['sol']['data']
 
-    fwd_idx = [fwd_ch_names.index(name) for name in ch_names]
+    # This actually reorders the gain matrix to conform to the info ch order
+    fwd_idx = [fwd_sol_ch_names.index(name) for name in ch_names]
     gain = gain[fwd_idx]
+    # Any function calling this helper will be using the returned fwd_info
+    # dict, so fwd['sol']['row_names'] becomes obsolete and is NOT re-ordered
+
     info_idx = [info['ch_names'].index(name) for name in ch_names]
     fwd_info = pick_info(info, info_idx)
 
@@ -1142,7 +1158,8 @@ def _prepare_forward(forward, info, noise_cov, pca=False, verbose=None):
 
 @verbose
 def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
-                          fixed=False, limit_depth_chs=True, verbose=None):
+                          fixed=False, limit_depth_chs=True, rank=None,
+                          verbose=None):
     """Assemble inverse operator
 
     Parameters
@@ -1152,7 +1169,7 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
         Bad channels in info['bads'] are not used.
     forward : dict
         Forward operator.
-    noise_cov : Covariance
+    noise_cov : instance of Covariance
         The noise covariance matrix.
     loose : None | float in [0, 1]
         Value that weights the source variances of the dipole components
@@ -1167,6 +1184,11 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
         If True, use only grad channels in depth weighting (equivalent to MNE
         C code). If grad chanels aren't present, only mag channels will be
         used (if no mag, then eeg). If False, use all channels.
+    rank : None | int | dict
+        Specified rank of the noise covariance matrix. If None, the rank is
+        detected automatically. If int, the rank is specified for the MEG
+        channels. A dictionary with entries 'eeg' and/or 'meg' can be used
+        to specify the rank for each modality.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1209,7 +1231,7 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
     has patch statistics computed, these are used to improve the depth
     weighting. Thus slightly different results are to be expected with
     and without this information.
-    """
+    """  # noqa
     is_fixed_ori = is_fixed_orient(forward)
 
     if fixed and loose is not None:
@@ -1237,11 +1259,13 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
     if depth is not None:
         if not (0 < depth <= 1):
             raise ValueError('depth should be a scalar between 0 and 1')
-        if is_fixed_ori or not forward['surf_ori']:
+        if is_fixed_ori:
             raise ValueError('You need a free-orientation, surface-oriented '
                              'forward solution to do depth weighting even '
                              'when calculating a fixed-orientation inverse.')
-
+        if not forward['surf_ori']:
+            forward = convert_forward_solution(forward, surf_ori=True)
+        assert forward['surf_ori']
     if loose is not None:
         if not (0 <= loose <= 1):
             raise ValueError('loose value should be smaller than 1 and bigger '
@@ -1260,7 +1284,8 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
     #
 
     gain_info, gain, noise_cov, whitener, n_nzero = \
-        _prepare_forward(forward, info, noise_cov)
+        _prepare_forward(forward, info, noise_cov, rank=rank)
+    forward['info']._check_consistency()
 
     #
     # 5. Compose the depth-weighting matrix
@@ -1283,8 +1308,8 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
         if not is_fixed_ori:
             # Convert to the fixed orientation forward solution now
             depth_prior = depth_prior[2::3]
-            forward = deepcopy(forward)
-            _to_fixed_ori(forward)
+            forward = convert_forward_solution(
+                forward, surf_ori=forward['surf_ori'], force_fixed=True)
             is_fixed_ori = is_fixed_orient(forward)
             gain_info, gain, noise_cov, whitener, n_nzero = \
                 _prepare_forward(forward, info, noise_cov, verbose=False)
@@ -1398,7 +1423,9 @@ def make_inverse_operator(info, forward, noise_cov, loose=0.2, depth=0.8,
                   source_nn=forward['source_nn'].copy(),
                   src=deepcopy(forward['src']), fmri_prior=None)
     inv_info = deepcopy(forward['info'])
-    inv_info['bads'] = deepcopy(info['bads'])
+    inv_info['bads'] = [bad for bad in info['bads']
+                        if bad in inv_info['ch_names']]
+    inv_info._check_consistency()
     inv_op['units'] = 'Am'
     inv_op['info'] = inv_info
 
@@ -1426,3 +1453,124 @@ def compute_rank_inverse(inv):
         ncomp = make_projector(inv['projs'], inv['noise_cov']['names'])[1]
         rank = inv['noise_cov']['dim'] - ncomp
     return rank
+
+
+# #############################################################################
+# SNR Estimation
+
+ at verbose
+def estimate_snr(evoked, inv, verbose=None):
+    """Estimate the SNR as a function of time for evoked data
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        Evoked instance.
+    inv : instance of InverseOperator
+        The inverse operator.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    snr : ndarray, shape (n_times,)
+        The SNR estimated from the whitened data.
+    snr_est : ndarray, shape (n_times,)
+        The SNR estimated using the mismatch between the unregularized
+        solution and the regularized solution.
+
+    Notes
+    -----
+    ``snr_est`` is estimated by using different amounts of inverse
+    regularization and checking the mismatch between predicted and
+    measured whitened data.
+
+    In more detail, given our whitened inverse obtained from SVD:
+
+    .. math::
+
+        \\tilde{M} = R^\\frac{1}{2}V\\Gamma U^T
+
+    The values in the diagonal matrix :math:`\\Gamma` are expressed in terms
+    of the chosen regularization :math:`\\lambda\\approx\\frac{1}{\\rm{SNR}^2}`
+    and singular values :math:`\\lambda_k` as:
+
+    .. math::
+
+        \\gamma_k = \\frac{1}{\\lambda_k}\\frac{\\lambda_k^2}{\\lambda_k^2 + \\lambda^2}
+
+    We also know that our predicted data is given by:
+
+    .. math::
+
+        \\hat{x}(t) = G\\hat{j}(t)=C^\\frac{1}{2}U\\Pi w(t)
+
+    And thus our predicted whitened data is just:
+
+    .. math::
+
+        \\hat{w}(t) = U\\Pi w(t)
+
+    Where :math:`\\Pi` is diagonal with entries entries:
+
+    .. math::
+
+        \\lambda_k\\gamma_k = \\frac{\\lambda_k^2}{\\lambda_k^2 + \\lambda^2}
+
+    If we use no regularization, note that :math:`\\Pi` is just the
+    identity matrix. Here we test the squared magnitude of the difference
+    between unregularized solution and regularized solutions, choosing the
+    biggest regularization that achieves a :math:`\\chi^2`-test significance
+    of 0.001.
+
+    .. versionadded:: 0.9.0
+    """  # noqa
+    from scipy.stats import chi2
+    _check_reference(evoked)
+    _check_ch_names(inv, evoked.info)
+    inv = prepare_inverse_operator(inv, evoked.nave, 1. / 9., 'MNE')
+    sel = _pick_channels_inverse_operator(evoked.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    data_white = np.dot(inv['whitener'], np.dot(inv['proj'], evoked.data[sel]))
+    data_white_ef = np.dot(inv['eigen_fields']['data'], data_white)
+    n_ch, n_times = data_white.shape
+
+    # Adapted from mne_analyze/regularization.c, compute_regularization
+    n_zero = (inv['noise_cov']['eig'] <= 0).sum()
+    logger.info('Effective nchan = %d - %d = %d'
+                % (n_ch, n_zero, n_ch - n_zero))
+    signal = np.sum(data_white ** 2, axis=0)  # sum of squares across channels
+    noise = n_ch - n_zero
+    snr = signal / noise
+
+    # Adapted from noise_regularization
+    lambda2_est = np.empty(n_times)
+    lambda2_est.fill(10.)
+    remaining = np.ones(n_times, bool)
+
+    # deal with low SNRs
+    bad = (snr <= 1)
+    lambda2_est[bad] = 100.
+    remaining[bad] = False
+
+    # parameters
+    lambda_mult = 0.9
+    sing2 = (inv['sing'] * inv['sing'])[:, np.newaxis]
+    val = chi2.isf(1e-3, n_ch - 1)
+    for n_iter in range(1000):
+        # get_mne_weights (ew=error_weights)
+        # (split newaxis creation here for old numpy)
+        f = sing2 / (sing2 + lambda2_est[np.newaxis][:, remaining])
+        f[inv['sing'] == 0] = 0
+        ew = data_white_ef[:, remaining] * (1.0 - f)
+        # check condition
+        err = np.sum(ew * ew, axis=0)
+        remaining[np.where(remaining)[0][err < val]] = False
+        if not remaining.any():
+            break
+        lambda2_est[remaining] *= lambda_mult
+    else:
+        warnings.warn('SNR estimation did not converge')
+    snr_est = 1.0 / np.sqrt(lambda2_est)
+    snr = np.sqrt(snr)
+    return snr, snr_est
diff --git a/mne/minimum_norm/psf_ctf.py b/mne/minimum_norm/psf_ctf.py
index d317f46..7081159 100644
--- a/mne/minimum_norm/psf_ctf.py
+++ b/mne/minimum_norm/psf_ctf.py
@@ -8,14 +8,34 @@ from copy import deepcopy
 import numpy as np
 from scipy import linalg
 
+from ..io.pick import pick_channels
 from ..utils import logger, verbose
-from ..io.constants import FIFF
+from ..forward import convert_forward_solution
 from ..evoked import EvokedArray
 from ..source_estimate import SourceEstimate
 from .inverse import _subject_from_inverse
 from . import apply_inverse
 
 
+def _prepare_info(inverse_operator):
+    """Helper to get a usable dict"""
+    # in order to convert sub-leadfield matrix to evoked data type (pretending
+    # it's an epoch, see in loop below), uses 'info' from inverse solution
+    # because this has all the correct projector information
+    info = deepcopy(inverse_operator['info'])
+    info['sfreq'] = 1000.  # necessary
+    info['projs'] = inverse_operator['projs']
+    return info
+
+
+def _pick_leadfield(leadfield, forward, ch_names):
+    """Helper to pick out correct lead field components"""
+    # NB must pick from fwd['sol']['row_names'], not ['info']['ch_names'],
+    # because ['sol']['data'] may be ordered differently from functional data
+    picks_fwd = pick_channels(forward['sol']['row_names'], ch_names)
+    return leadfield[picks_fwd]
+
+
 @verbose
 def point_spread_function(inverse_operator, forward, labels, method='dSPM',
                           lambda2=1 / 9., pick_ori=None, mode='mean',
@@ -29,11 +49,10 @@ def point_spread_function(inverse_operator, forward, labels, method='dSPM',
     Parameters
     ----------
     inverse_operator : instance of InverseOperator
-        Inverse operator read with mne.read_inverse_operator.
+        Inverse operator.
     forward : dict
-        Forward solution, created with "surf_ori=True" and "force_fixed=False"
-        Note: (Bad) channels not included in forward solution will not be used
-        in PSF computation.
+        Forward solution. Note: (Bad) channels not included in forward
+        solution will not be used in PSF computation.
     labels : list of Label
         Labels for which PSFs shall be computed.
     method : 'MNE' | 'dSPM' | 'sLORETA'
@@ -84,23 +103,11 @@ def point_spread_function(inverse_operator, forward, labels, method='dSPM',
 
     logger.info("About to process %d labels" % len(labels))
 
-    if not forward['surf_ori']:
-        raise RuntimeError('Forward has to be surface oriented '
-                           '(surf_ori=True).')
-
-    # get whole leadfield matrix with normal dipole components
-    if not (forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI):
-        # if forward solution already created with force_fixed=True
-        leadfield = forward['sol']['data']
-    else:  # pick normal components of forward solution
-        leadfield = forward['sol']['data'][:, 2::3]
-
-    # in order to convert sub-leadfield matrix to evoked data type (pretending
-    # it's an epoch, see in loop below), uses 'info' from forward solution,
-    # need to add 'sfreq' and 'proj'
-    info = deepcopy(forward['info'])
-    info['sfreq'] = 1000.  # add sfreq or it won't work
-    info['projs'] = []  # add projs
+    forward = convert_forward_solution(forward, force_fixed=False,
+                                       surf_ori=True)
+    info = _prepare_info(inverse_operator)
+    leadfield = _pick_leadfield(forward['sol']['data'][:, 2::3], forward,
+                                info['ch_names'])
 
     # will contain means of subleadfields for all labels
     label_psf_summary = []
@@ -158,8 +165,8 @@ def point_spread_function(inverse_operator, forward, labels, method='dSPM',
                         np.sum(s_svd * s_svd))
             logger.info("Your %d component(s) explain(s) %.1f%% "
                         "variance in label." % (n_svd_comp, comp_var))
-            this_label_psf_summary = (u_svd[:, :n_svd_comp]
-                                      * s_svd[:n_svd_comp][np.newaxis, :])
+            this_label_psf_summary = (u_svd[:, :n_svd_comp] *
+                                      s_svd[:n_svd_comp][np.newaxis, :])
             # transpose required for conversion to "evoked"
             this_label_psf_summary = this_label_psf_summary.T
 
@@ -185,8 +192,8 @@ def point_spread_function(inverse_operator, forward, labels, method='dSPM',
 
 
 def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
-                                      method='dSPM', lambda2=1. / 9., mode='mean',
-                                      n_svd_comp=1):
+                                      method='dSPM', lambda2=1. / 9.,
+                                      mode='mean', n_svd_comp=1):
     """Get inverse matrix from an inverse operator
 
     Currently works only for fixed/loose orientation constraints
@@ -196,9 +203,9 @@ def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
     Parameters
     ----------
     inverse_operator : instance of InverseOperator
-        Inverse operator read with mne.read_inverse_operator.
+        The inverse operator.
     forward : dict
-         The forward operator.
+        The forward operator.
     method : 'MNE' | 'dSPM' | 'sLORETA'
         Inverse methods (for apply_inverse).
     labels : list of Label | None
@@ -253,15 +260,10 @@ def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
     else:
         logger.info("Computing whole inverse operator.")
 
-    # in order to convert sub-leadfield matrix to evoked data type (pretending
-    # it's an epoch, see in loop below), uses 'info' from forward solution,
-    # need to add 'sfreq' and 'proj'
-    info = deepcopy(forward['info'])
-    info['sfreq'] = 1000.  # add sfreq or it won't work
-    info['projs'] = []  # add projs
+    info = _prepare_info(inverse_operator)
 
     # create identity matrix as input for inverse operator
-    id_mat = np.eye(forward['nchan'])
+    id_mat = np.eye(len(info['ch_names']))
 
     # convert identity matrix to evoked data type (pretending it's an epoch)
     ev_id = EvokedArray(id_mat, info=info, tmin=0.)
@@ -339,8 +341,8 @@ def _get_matrix_from_inverse_operator(inverse_operator, forward, labels=None,
                             np.sum(s_svd * s_svd))
                 logger.info("Your %d component(s) explain(s) %.1f%% "
                             "variance in label." % (n_svd_comp, comp_var))
-                this_invmat_summary = (u_svd[:, :n_svd_comp].T
-                                       * s_svd[:n_svd_comp][:, np.newaxis])
+                this_invmat_summary = (u_svd[:, :n_svd_comp].T *
+                                       s_svd[:n_svd_comp][:, np.newaxis])
 
             invmat_summary.append(this_invmat_summary)
 
@@ -364,15 +366,14 @@ def cross_talk_function(inverse_operator, forward, labels,
     Parameters
     ----------
     inverse_operator : instance of InverseOperator
-        Inverse operator read with mne.read_inverse_operator.
+        Inverse operator.
     forward : dict
-         Forward solution, created with "force_fixed=True"
-         Note: (Bad) channels not included in forward solution will not be used
-         in CTF computation.
-    method : 'MNE' | 'dSPM' | 'sLORETA'
-        Inverse method for which CTFs shall be computed.
+        Forward solution. Note: (Bad) channels not included in forward
+        solution will not be used in CTF computation.
     labels : list of Label
         Labels for which CTFs shall be computed.
+    method : 'MNE' | 'dSPM' | 'sLORETA'
+        Inverse method for which CTFs shall be computed.
     lambda2 : float
         The regularization parameter.
     signed : bool
@@ -402,6 +403,9 @@ def cross_talk_function(inverse_operator, forward, labels,
         (i.e. n_svd_comp successive time points in mne_analyze)
         The last sample is the summed CTF across all labels.
     """
+    forward = convert_forward_solution(forward, force_fixed=True,
+                                       surf_ori=True)
+
     # get the inverse matrix corresponding to inverse operator
     out = _get_matrix_from_inverse_operator(inverse_operator, forward,
                                             labels=labels, method=method,
@@ -410,7 +414,8 @@ def cross_talk_function(inverse_operator, forward, labels,
     invmat, label_singvals = out
 
     # get the leadfield matrix from forward solution
-    leadfield = forward['sol']['data']
+    leadfield = _pick_leadfield(forward['sol']['data'], forward,
+                                inverse_operator['info']['ch_names'])
 
     # compute cross-talk functions (CTFs)
     ctfs = np.dot(invmat, leadfield)
diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py
index 31cccd2..22747ce 100644
--- a/mne/minimum_norm/tests/test_inverse.py
+++ b/mne/minimum_norm/tests/test_inverse.py
@@ -1,51 +1,75 @@
 from __future__ import print_function
 import os.path as op
 import numpy as np
-from numpy.testing import assert_array_almost_equal, assert_equal
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_allclose, assert_array_equal)
 from scipy import sparse
 from nose.tools import assert_true, assert_raises
 import copy
 import warnings
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne.label import read_label, label_sign_flip
 from mne.event import read_events
 from mne.epochs import Epochs
 from mne.source_estimate import read_source_estimate, VolSourceEstimate
-from mne import read_cov, read_forward_solution, read_evokeds, pick_types
+from mne import (read_cov, read_forward_solution, read_evokeds, pick_types,
+                 pick_types_forward, make_forward_solution,
+                 convert_forward_solution, Covariance)
 from mne.io import Raw
 from mne.minimum_norm.inverse import (apply_inverse, read_inverse_operator,
                                       apply_inverse_raw, apply_inverse_epochs,
                                       make_inverse_operator,
                                       write_inverse_operator,
-                                      compute_rank_inverse)
-from mne.utils import _TempDir
-from ...externals import six
-
-s_path = op.join(sample.data_path(download=False), 'MEG', 'sample')
-fname_inv = op.join(s_path, 'sample_audvis-meg-oct-6-meg-inv.fif')
-fname_inv_fixed = op.join(s_path, 'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
-fname_inv_nodepth = op.join(s_path,
-                            'sample_audvis-meg-oct-6-meg-nodepth'
-                            '-fixed-inv.fif')
-fname_inv_diag = op.join(s_path,
-                         'sample_audvis-meg-oct-6-meg-diagnoise-inv.fif')
-fname_vol_inv = op.join(s_path, 'sample_audvis-meg-vol-7-meg-inv.fif')
-fname_data = op.join(s_path, 'sample_audvis-ave.fif')
-fname_cov = op.join(s_path, 'sample_audvis-cov.fif')
-fname_fwd = op.join(s_path, 'sample_audvis-meg-oct-6-fwd.fif')
-fname_fwd_meeg = op.join(s_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
-fname_raw = op.join(s_path, 'sample_audvis_filt-0-40_raw.fif')
-fname_event = op.join(s_path, 'sample_audvis_filt-0-40_raw-eve.fif')
+                                      compute_rank_inverse,
+                                      prepare_inverse_operator)
+from mne.utils import _TempDir, run_tests_if_main, slow_test
+from mne.externals import six
+
+test_path = testing.data_path(download=False)
+s_path = op.join(test_path, 'MEG', 'sample')
+fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+# Four inverses:
+fname_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_inv = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+fname_inv_fixed_nodepth = op.join(s_path,
+                                  'sample_audvis_trunc-meg-eeg-oct-4-meg'
+                                  '-nodepth-fixed-inv.fif')
+fname_inv_meeg_diag = op.join(s_path,
+                              'sample_audvis_trunc-'
+                              'meg-eeg-oct-4-meg-eeg-diagnoise-inv.fif')
+
+fname_data = op.join(s_path, 'sample_audvis_trunc-ave.fif')
+fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
+fname_raw = op.join(s_path, 'sample_audvis_trunc_raw.fif')
+fname_event = op.join(s_path, 'sample_audvis_trunc_raw-eve.fif')
 fname_label = op.join(s_path, 'labels', '%s.label')
+fname_vol_inv = op.join(s_path,
+                        'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
+# trans and bem needed for channel reordering tests incl. forward computation
+fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif')
+s_path_bem = op.join(test_path, 'subjects', 'sample', 'bem')
+fname_bem = op.join(s_path_bem, 'sample-320-320-320-bem-sol.fif')
+src_fname = op.join(s_path_bem, 'sample-oct-4-src.fif')
 
 snr = 3.0
 lambda2 = 1.0 / snr ** 2
 
-tempdir = _TempDir()
 last_keys = [None] * 10
 
 
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=True, eeg=False)
+    return fwd
+
+
+def read_forward_solution_eeg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=False, eeg=True)
+    return fwd
+
+
 def _get_evoked():
     evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
     evoked.crop(0, 0.2)
@@ -60,7 +84,7 @@ def _compare(a, b):
         if isinstance(a, dict):
             assert_true(isinstance(b, dict))
             for k, v in six.iteritems(a):
-                if not k in b and k not in skip_types:
+                if k not in b and k not in skip_types:
                     raise ValueError('First one had one second one didn\'t:\n'
                                      '%s not in %s' % (k, b.keys()))
                 if k not in skip_types:
@@ -68,7 +92,7 @@ def _compare(a, b):
                     last_keys = [k] + last_keys
                     _compare(v, b[k])
             for k, v in six.iteritems(b):
-                if not k in a and k not in skip_types:
+                if k not in a and k not in skip_types:
                     raise ValueError('Second one had one first one didn\'t:\n'
                                      '%s not in %s' % (k, a.keys()))
         elif isinstance(a, list):
@@ -88,13 +112,13 @@ def _compare(a, b):
         raise exptn
 
 
-def _compare_inverses_approx(inv_1, inv_2, evoked, stc_decimals,
+def _compare_inverses_approx(inv_1, inv_2, evoked, rtol, atol,
                              check_depth=True):
     # depth prior
     if check_depth:
         if inv_1['depth_prior'] is not None:
             assert_array_almost_equal(inv_1['depth_prior']['data'],
-                                      inv_2['depth_prior']['data'])
+                                      inv_2['depth_prior']['data'], 5)
         else:
             assert_true(inv_2['depth_prior'] is None)
     # orient prior
@@ -118,11 +142,12 @@ def _compare_inverses_approx(inv_1, inv_2, evoked, stc_decimals,
 
     assert_true(stc_1.subject == stc_2.subject)
     assert_equal(stc_1.times, stc_2.times)
-    assert_array_almost_equal(stc_1.data, stc_2.data, stc_decimals)
+    assert_allclose(stc_1.data, stc_2.data, rtol=rtol, atol=atol)
     assert_true(inv_1['units'] == inv_2['units'])
 
 
 def _compare_io(inv_op, out_file_ext='.fif'):
+    tempdir = _TempDir()
     if out_file_ext == '.fif':
         out_file = op.join(tempdir, 'test-inv.fif')
     elif out_file_ext == '.gz':
@@ -137,53 +162,129 @@ def _compare_io(inv_op, out_file_ext='.fif'):
     _compare(inv_init, inv_op)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_warn_inverse_operator():
     """Test MNE inverse warning without average EEG projection
     """
     bad_info = copy.deepcopy(_get_evoked().info)
     bad_info['projs'] = list()
-    fwd_op = read_forward_solution(fname_fwd_meeg, surf_ori=True)
+    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
     noise_cov = read_cov(fname_cov)
     with warnings.catch_warnings(record=True) as w:
         make_inverse_operator(bad_info, fwd_op, noise_cov)
     assert_equal(len(w), 1)
 
 
- at sample.requires_sample_data
-def test_apply_inverse_operator():
+ at slow_test
+ at testing.requires_testing_data
+def test_make_inverse_operator():
     """Test MNE inverse computation (precomputed and non-precomputed)
     """
-    inverse_operator = read_inverse_operator(fname_inv)
+    # Test old version of inverse computation starting from forward operator
     evoked = _get_evoked()
     noise_cov = read_cov(fname_cov)
-
-    # Test old version of inverse computation starting from forward operator
-    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    inverse_operator = read_inverse_operator(fname_inv)
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
     my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
                                       loose=0.2, depth=0.8,
                                       limit_depth_chs=False)
     _compare_io(my_inv_op)
     assert_true(inverse_operator['units'] == 'Am')
-    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2,
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2,
                              check_depth=False)
-    # Inverse has 306 channels - 4 proj = 302
-    assert_true(compute_rank_inverse(inverse_operator) == 302)
-
     # Test MNE inverse computation starting from forward operator
     my_inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov,
                                       loose=0.2, depth=0.8)
     _compare_io(my_inv_op)
-    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 2)
+    _compare_inverses_approx(my_inv_op, inverse_operator, evoked, 1e-2, 1e-2)
+    assert_true('dev_head_t' in my_inv_op['info'])
+    assert_true('mri_head_t' in my_inv_op)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_inverse_operator_channel_ordering():
+    """Test MNE inverse computation is immune to channel reorderings
+    """
+    # These are with original ordering
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    fwd_orig = make_forward_solution(evoked.info, fname_trans, src_fname,
+                                     fname_bem, eeg=True, mindist=5.0)
+    fwd_orig = convert_forward_solution(fwd_orig, surf_ori=True)
+    inv_orig = make_inverse_operator(evoked.info, fwd_orig, noise_cov,
+                                     loose=0.2, depth=0.8,
+                                     limit_depth_chs=False)
+    stc_1 = apply_inverse(evoked, inv_orig, lambda2, "dSPM")
+
+    # Assume that a raw reordering applies to both evoked and noise_cov,
+    # so we don't need to create those from scratch. Just reorder them,
+    # then try to apply the original inverse operator
+    new_order = np.arange(len(evoked.info['ch_names']))
+    randomiser = np.random.RandomState(42)
+    randomiser.shuffle(new_order)
+    evoked.data = evoked.data[new_order]
+    evoked.info['ch_names'] = [evoked.info['ch_names'][n] for n in new_order]
+    evoked.info['chs'] = [evoked.info['chs'][n] for n in new_order]
+
+    cov_ch_reorder = [c for c in evoked.info['ch_names']
+                      if (c in noise_cov.ch_names)]
+
+    new_order_cov = [noise_cov.ch_names.index(name) for name in cov_ch_reorder]
+    noise_cov['data'] = noise_cov.data[np.ix_(new_order_cov, new_order_cov)]
+    noise_cov['names'] = [noise_cov['names'][idx] for idx in new_order_cov]
+
+    fwd_reorder = make_forward_solution(evoked.info, fname_trans, src_fname,
+                                        fname_bem, eeg=True, mindist=5.0)
+    fwd_reorder = convert_forward_solution(fwd_reorder, surf_ori=True)
+    inv_reorder = make_inverse_operator(evoked.info, fwd_reorder, noise_cov,
+                                        loose=0.2, depth=0.8,
+                                        limit_depth_chs=False)
+
+    stc_2 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM")
+
+    assert_equal(stc_1.subject, stc_2.subject)
+    assert_array_equal(stc_1.times, stc_2.times)
+    assert_allclose(stc_1.data, stc_2.data, rtol=1e-5, atol=1e-5)
+    assert_true(inv_orig['units'] == inv_reorder['units'])
+
+    # Reload with original ordering & apply reordered inverse
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+
+    stc_3 = apply_inverse(evoked, inv_reorder, lambda2, "dSPM")
+    assert_allclose(stc_1.data, stc_3.data, rtol=1e-5, atol=1e-5)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_apply_inverse_operator():
+    """Test MNE inverse application
+    """
+    inverse_operator = read_inverse_operator(fname_full)
+    evoked = _get_evoked()
+
+    # Inverse has 306 channels - 4 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator) == 302)
+
     # Inverse has 306 channels - 4 proj = 302
     assert_true(compute_rank_inverse(inverse_operator) == 302)
 
     stc = apply_inverse(evoked, inverse_operator, lambda2, "MNE")
     assert_true(stc.subject == 'sample')
     assert_true(stc.data.min() > 0)
-    assert_true(stc.data.max() < 10e-10)
+    assert_true(stc.data.max() < 10e-9)
     assert_true(stc.data.mean() > 1e-11)
 
+    # test if using prepared and not prepared inverse operator give the same
+    # result
+    inv_op = prepare_inverse_operator(inverse_operator, nave=evoked.nave,
+                                      lambda2=lambda2, method="MNE")
+    stc2 = apply_inverse(evoked, inv_op, lambda2, "MNE")
+    assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.times, stc2.times)
+
     stc = apply_inverse(evoked, inverse_operator, lambda2, "sLORETA")
     assert_true(stc.subject == 'sample')
     assert_true(stc.data.min() > 0)
@@ -196,23 +297,32 @@ def test_apply_inverse_operator():
     assert_true(stc.data.max() < 35)
     assert_true(stc.data.mean() > 0.1)
 
-    my_stc = apply_inverse(evoked, my_inv_op, lambda2, "dSPM")
-
-    assert_true('dev_head_t' in my_inv_op['info'])
-    assert_true('mri_head_t' in my_inv_op)
+    # test without using a label (so delayed computation is used)
+    label = read_label(fname_label % 'Aud-lh')
+    stc = apply_inverse(evoked, inv_op, lambda2, "MNE")
+    stc_label = apply_inverse(evoked, inv_op, lambda2, "MNE",
+                              label=label)
+    assert_equal(stc_label.subject, 'sample')
+    label_stc = stc.in_label(label)
+    assert_true(label_stc.subject == 'sample')
+    assert_array_almost_equal(stc_label.data, label_stc.data)
 
-    assert_true(my_stc.subject == 'sample')
-    assert_equal(stc.times, my_stc.times)
-    assert_array_almost_equal(stc.data, my_stc.data, 2)
+    # Test we get errors when using custom ref or no average proj is present
+    evoked.info['custom_ref_applied'] = True
+    assert_raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE")
+    evoked.info['custom_ref_applied'] = False
+    evoked.info['projs'] = []  # remove EEG proj
+    assert_raises(ValueError, apply_inverse, evoked, inv_op, lambda2, "MNE")
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_make_inverse_operator_fixed():
     """Test MNE inverse computation (fixed orientation)
     """
-    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
-    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
-    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)
+    fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=False)
+    fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=True)
     evoked = _get_evoked()
     noise_cov = read_cov(fname_cov)
 
@@ -223,33 +333,26 @@ def test_make_inverse_operator_fixed():
     assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
                   noise_cov, depth=0.8, loose=None, fixed=True)
 
-    # compare to C solution w/fixed
-    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=0.8,
-                                   loose=None, fixed=True)
-    _compare_io(inv_op)
-    inverse_operator_fixed = read_inverse_operator(fname_inv_fixed)
-    _compare_inverses_approx(inverse_operator_fixed, inv_op, evoked, 2)
-    # Inverse has 306 channels - 4 proj = 302
-    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
-
     # now compare to C solution
     # note that the forward solution must not be surface-oriented
     # to get equivalency (surf_ori=True changes the normals)
     inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None,
                                    loose=None, fixed=True)
-    inverse_operator_nodepth = read_inverse_operator(fname_inv_nodepth)
-    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 2)
-    # Inverse has 306 channels - 4 proj = 302
-    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
+    inverse_operator_nodepth = read_inverse_operator(fname_inv_fixed_nodepth)
+    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 0, 1e-2)
+    # Inverse has 306 channels - 6 proj = 302
+    assert_true(compute_rank_inverse(inverse_operator_nodepth) == 302)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_make_inverse_operator_free():
     """Test MNE inverse computation (free orientation)
     """
-    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
-    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
-    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    fwd_1 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=False)
+    fwd_2 = read_forward_solution_meg(fname_fwd, surf_ori=False,
+                                      force_fixed=True)
     evoked = _get_evoked()
     noise_cov = read_cov(fname_cov)
 
@@ -260,17 +363,17 @@ def test_make_inverse_operator_free():
     # for free ori inv, loose=None and loose=1 should be equivalent
     inv_1 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=None)
     inv_2 = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=1)
-    _compare_inverses_approx(inv_1, inv_2, evoked, 2)
+    _compare_inverses_approx(inv_1, inv_2, evoked, 0, 1e-2)
 
     # for depth=None, surf_ori of the fwd should not matter
     inv_3 = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=None,
                                   loose=None)
     inv_4 = make_inverse_operator(evoked.info, fwd_1, noise_cov, depth=None,
                                   loose=None)
-    _compare_inverses_approx(inv_3, inv_4, evoked, 2)
+    _compare_inverses_approx(inv_3, inv_4, evoked, 0, 1e-2)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_make_inverse_operator_diag():
     """Test MNE inverse computation with diagonal noise cov
     """
@@ -280,20 +383,37 @@ def test_make_inverse_operator_diag():
     inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov.as_diag(),
                                    loose=0.2, depth=0.8)
     _compare_io(inv_op)
-    inverse_operator_diag = read_inverse_operator(fname_inv_diag)
+    inverse_operator_diag = read_inverse_operator(fname_inv_meeg_diag)
     # This one's only good to zero decimal places, roundoff error (?)
-    _compare_inverses_approx(inverse_operator_diag, inv_op, evoked, 0)
-    # Inverse has 306 channels - 4 proj = 302
-    assert_true(compute_rank_inverse(inverse_operator_diag) == 302)
+    _compare_inverses_approx(inverse_operator_diag, inv_op, evoked, 0, 1e0)
+    # Inverse has 366 channels - 6 proj = 360
+    assert_true(compute_rank_inverse(inverse_operator_diag) == 360)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
+def test_inverse_operator_noise_cov_rank():
+    """Test MNE inverse operator with a specified noise cov rank
+    """
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
+    evoked = _get_evoked()
+    noise_cov = read_cov(fname_cov)
+    inv = make_inverse_operator(evoked.info, fwd_op, noise_cov, rank=64)
+    assert_true(compute_rank_inverse(inv) == 64)
+
+    fwd_op = read_forward_solution_eeg(fname_fwd, surf_ori=True)
+    inv = make_inverse_operator(evoked.info, fwd_op, noise_cov,
+                                rank=dict(eeg=20))
+    assert_true(compute_rank_inverse(inv) == 20)
+
+
+ at testing.requires_testing_data
 def test_inverse_operator_volume():
     """Test MNE inverse computation on volume source space
     """
+    tempdir = _TempDir()
     evoked = _get_evoked()
     inverse_operator_vol = read_inverse_operator(fname_vol_inv)
-    _compare_io(inverse_operator_vol)
+    assert_true(repr(inverse_operator_vol))
     stc = apply_inverse(evoked, inverse_operator_vol, lambda2, "dSPM")
     assert_true(isinstance(stc, VolSourceEstimate))
     # volume inverses don't have associated subject IDs
@@ -306,12 +426,16 @@ def test_inverse_operator_volume():
     assert_array_almost_equal(stc.times, stc2.times)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_io_inverse_operator():
-    """Test IO of inverse_operator with GZip
+    """Test IO of inverse_operator
     """
+    tempdir = _TempDir()
     inverse_operator = read_inverse_operator(fname_inv)
-    print(inverse_operator)
+    x = repr(inverse_operator)
+    assert_true(x)
+    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
     # just do one example for .gz, as it should generalize
     _compare_io(inverse_operator, '.gz')
 
@@ -323,8 +447,20 @@ def test_io_inverse_operator():
         read_inverse_operator(inv_badname)
     assert_true(len(w) == 2)
 
+    # make sure we can write and read
+    inv_fname = op.join(tempdir, 'test-inv.fif')
+    args = (10, 1. / 9., 'dSPM')
+    inv_prep = prepare_inverse_operator(inverse_operator, *args)
+    write_inverse_operator(inv_fname, inv_prep)
+    inv_read = read_inverse_operator(inv_fname)
+    _compare(inverse_operator, inv_read)
+    inv_read_prep = prepare_inverse_operator(inv_read, *args)
+    _compare(inv_prep, inv_read_prep)
+    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
+    _compare(inv_prep, inv_prep_prep)
+
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_apply_mne_inverse_raw():
     """Test MNE with precomputed inverse operator on Raw
     """
@@ -333,16 +469,19 @@ def test_apply_mne_inverse_raw():
     raw = Raw(fname_raw)
     label_lh = read_label(fname_label % 'Aud-lh')
     _, times = raw[0, start:stop]
-    inverse_operator = read_inverse_operator(fname_inv)
+    inverse_operator = read_inverse_operator(fname_full)
+    inverse_operator = prepare_inverse_operator(inverse_operator, nave=1,
+                                                lambda2=lambda2, method="dSPM")
     for pick_ori in [None, "normal"]:
         stc = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
                                 label=label_lh, start=start, stop=stop, nave=1,
-                                pick_ori=pick_ori, buffer_size=None)
+                                pick_ori=pick_ori, buffer_size=None,
+                                prepared=True)
 
         stc2 = apply_inverse_raw(raw, inverse_operator, lambda2, "dSPM",
                                  label=label_lh, start=start, stop=stop,
                                  nave=1, pick_ori=pick_ori,
-                                 buffer_size=3)
+                                 buffer_size=3, prepared=True)
 
         if pick_ori is None:
             assert_true(np.all(stc.data > 0))
@@ -355,7 +494,7 @@ def test_apply_mne_inverse_raw():
         assert_array_almost_equal(stc.data, stc2.data)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_apply_mne_inverse_fixed_raw():
     """Test MNE with fixed-orientation inverse operator on Raw
     """
@@ -366,31 +505,40 @@ def test_apply_mne_inverse_fixed_raw():
     label_lh = read_label(fname_label % 'Aud-lh')
 
     # create a fixed-orientation inverse operator
-    fwd = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True)
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=False,
+                                    surf_ori=True)
     noise_cov = read_cov(fname_cov)
     inv_op = make_inverse_operator(raw.info, fwd, noise_cov,
                                    loose=None, depth=0.8, fixed=True)
 
-    stc = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+    inv_op2 = prepare_inverse_operator(inv_op, nave=1,
+                                       lambda2=lambda2, method="dSPM")
+    stc = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM",
                             label=label_lh, start=start, stop=stop, nave=1,
-                            pick_ori=None, buffer_size=None)
+                            pick_ori=None, buffer_size=None, prepared=True)
 
-    stc2 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+    stc2 = apply_inverse_raw(raw, inv_op2, lambda2, "dSPM",
                              label=label_lh, start=start, stop=stop, nave=1,
-                             pick_ori=None, buffer_size=3)
+                             pick_ori=None, buffer_size=3, prepared=True)
+
+    stc3 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
+                             label=label_lh, start=start, stop=stop, nave=1,
+                             pick_ori=None, buffer_size=None)
 
     assert_true(stc.subject == 'sample')
     assert_true(stc2.subject == 'sample')
     assert_array_almost_equal(stc.times, times)
     assert_array_almost_equal(stc2.times, times)
+    assert_array_almost_equal(stc3.times, times)
     assert_array_almost_equal(stc.data, stc2.data)
+    assert_array_almost_equal(stc.data, stc3.data)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_apply_mne_inverse_epochs():
     """Test MNE with precomputed inverse operator on Epochs
     """
-    inverse_operator = read_inverse_operator(fname_inv)
+    inverse_operator = read_inverse_operator(fname_full)
     label_lh = read_label(fname_label % 'Aud-lh')
     label_rh = read_label(fname_label % 'Aud-rh')
     event_id, tmin, tmax = 1, -0.2, 0.5
@@ -406,8 +554,17 @@ def test_apply_mne_inverse_epochs():
                     baseline=(None, 0), reject=reject, flat=flat)
     stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
                                 label=label_lh, pick_ori="normal")
-
-    assert_true(len(stcs) == 4)
+    inverse_operator = prepare_inverse_operator(inverse_operator, nave=1,
+                                                lambda2=lambda2, method="dSPM")
+    stcs2 = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
+                                 label=label_lh, pick_ori="normal",
+                                 prepared=True)
+    # test if using prepared and not prepared inverse operator give the same
+    # result
+    assert_array_almost_equal(stcs[0].data, stcs2[0].data)
+    assert_array_almost_equal(stcs[0].times, stcs2[0].times)
+
+    assert_true(len(stcs) == 2)
     assert_true(3 < stcs[0].data.max() < 10)
     assert_true(stcs[0].subject == 'sample')
 
@@ -420,11 +577,14 @@ def test_apply_mne_inverse_epochs():
     assert_true(label_mean.max() < label_mean_flip.max())
 
     # test extracting a BiHemiLabel
+
     stcs_rh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
-                                   label=label_rh, pick_ori="normal")
+                                   label=label_rh, pick_ori="normal",
+                                   prepared=True)
     stcs_bh = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
                                    label=label_lh + label_rh,
-                                   pick_ori="normal")
+                                   pick_ori="normal",
+                                   prepared=True)
 
     n_lh = len(stcs[0].data)
     assert_array_almost_equal(stcs[0].data, stcs_bh[0].data[:n_lh])
@@ -432,18 +592,18 @@ def test_apply_mne_inverse_epochs():
 
     # test without using a label (so delayed computation is used)
     stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, "dSPM",
-                                pick_ori="normal")
+                                pick_ori="normal", prepared=True)
     assert_true(stcs[0].subject == 'sample')
     label_stc = stcs[0].in_label(label_rh)
     assert_true(label_stc.subject == 'sample')
     assert_array_almost_equal(stcs_rh[0].data, label_stc.data)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_make_inverse_operator_bads():
     """Test MNE inverse computation given a mismatch of bad channels
     """
-    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
+    fwd_op = read_forward_solution_meg(fname_fwd, surf_ori=True)
     evoked = _get_evoked()
     noise_cov = read_cov(fname_cov)
 
@@ -455,5 +615,7 @@ def test_make_inverse_operator_bads():
     evoked.info['bads'].append(bad)
 
     assert_true(len(set(inv_['info']['ch_names']) - union_good) == 0)
-
     assert_true(len(set(inv_['info']['bads']) - union_bads) == 0)
+
+
+run_tests_if_main()
diff --git a/mne/minimum_norm/tests/test_psf_ctf.py b/mne/minimum_norm/tests/test_psf_ctf.py
index faaf6cd..78702e2 100644
--- a/mne/minimum_norm/tests/test_psf_ctf.py
+++ b/mne/minimum_norm/tests/test_psf_ctf.py
@@ -1,16 +1,21 @@
 
 import os.path as op
 import mne
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import read_forward_solution
 from mne.minimum_norm import (read_inverse_operator,
                               point_spread_function, cross_talk_function)
+from mne.utils import slow_test, run_tests_if_main
 
 from nose.tools import assert_true
 
-data_path = op.join(sample.data_path(download=False), 'MEG', 'sample')
-fname_inv = op.join(data_path, 'sample_audvis-meg-oct-6-meg-inv.fif')
-fname_fwd = op.join(data_path, 'sample_audvis-meg-oct-6-fwd.fif')
+data_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+
+fname_inv_meg = op.join(data_path,
+                        'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+fname_inv_meeg = op.join(data_path, 'sample_audvis_trunc-meg-eeg-oct-4-'
+                         'meg-eeg-diagnoise-inv.fif')
+fname_fwd = op.join(data_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
 
 fname_label = [op.join(data_path, 'labels', 'Aud-rh.label'),
                op.join(data_path, 'labels', 'Aud-lh.label')]
@@ -19,61 +24,58 @@ snr = 3.0
 lambda2 = 1.0 / snr ** 2
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_psf_ctf():
     """Test computation of PSFs and CTFs for linear estimators
     """
-
-    inverse_operator = read_inverse_operator(fname_inv)
-    forward = read_forward_solution(fname_fwd, force_fixed=False,
-                                    surf_ori=True)
+    forward = read_forward_solution(fname_fwd)
     labels = [mne.read_label(ss) for ss in fname_label]
 
     method = 'MNE'
     n_svd_comp = 2
 
-    # Test PSFs (then CTFs)
-    for mode in ('sum', 'svd'):
-        stc_psf, psf_ev = point_spread_function(inverse_operator,
-                                                forward,
-                                                method=method,
-                                                labels=labels,
-                                                lambda2=lambda2,
-                                                pick_ori='normal',
-                                                mode=mode,
-                                                n_svd_comp=n_svd_comp)
-
-        n_vert, n_samples = stc_psf.shape
-        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
-                         inverse_operator['src'][0]['vertno'].shape[0])
-        if mode == 'svd':
-            should_n_samples = len(labels) * n_svd_comp + 1
-        else:
-            should_n_samples = len(labels) + 1
-
-        assert_true(n_vert == should_n_vert)
-        assert_true(n_samples == should_n_samples)
-
-        n_chan, n_samples = psf_ev.data.shape
-        assert_true(n_chan == forward['nchan'])
-
-    forward = read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
-
-    # Test CTFs
-    for mode in ('sum', 'svd'):
-        stc_ctf = cross_talk_function(inverse_operator, forward,
-                                      labels, method=method,
-                                      lambda2=lambda2,
-                                      signed=False, mode=mode,
-                                      n_svd_comp=n_svd_comp)
-
-        n_vert, n_samples = stc_ctf.shape
-        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
-                         inverse_operator['src'][0]['vertno'].shape[0])
-        if mode == 'svd':
-            should_n_samples = len(labels) * n_svd_comp + 1
-        else:
-            should_n_samples = len(labels) + 1
-
-        assert_true(n_vert == should_n_vert)
-        assert_true(n_samples == should_n_samples)
+    # make sure it works for both types of inverses
+    for fname_inv in (fname_inv_meg, fname_inv_meeg):
+        inverse_operator = read_inverse_operator(fname_inv)
+        # Test PSFs (then CTFs)
+        for mode in ('sum', 'svd'):
+            stc_psf, psf_ev = point_spread_function(
+                inverse_operator, forward, method=method, labels=labels,
+                lambda2=lambda2, pick_ori='normal', mode=mode,
+                n_svd_comp=n_svd_comp)
+
+            n_vert, n_samples = stc_psf.shape
+            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                             inverse_operator['src'][0]['vertno'].shape[0])
+            if mode == 'svd':
+                should_n_samples = len(labels) * n_svd_comp + 1
+            else:
+                should_n_samples = len(labels) + 1
+
+            assert_true(n_vert == should_n_vert)
+            assert_true(n_samples == should_n_samples)
+
+            n_chan, n_samples = psf_ev.data.shape
+            assert_true(n_chan == forward['nchan'])
+
+        # Test CTFs
+        for mode in ('sum', 'svd'):
+            stc_ctf = cross_talk_function(
+                inverse_operator, forward, labels, method=method,
+                lambda2=lambda2, signed=False, mode=mode,
+                n_svd_comp=n_svd_comp)
+
+            n_vert, n_samples = stc_ctf.shape
+            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
+                             inverse_operator['src'][0]['vertno'].shape[0])
+            if mode == 'svd':
+                should_n_samples = len(labels) * n_svd_comp + 1
+            else:
+                should_n_samples = len(labels) + 1
+
+            assert_true(n_vert == should_n_vert)
+            assert_true(n_samples == should_n_samples)
+
+
+run_tests_if_main()
diff --git a/mne/minimum_norm/tests/test_snr.py b/mne/minimum_norm/tests/test_snr.py
new file mode 100644
index 0000000..ebbd776
--- /dev/null
+++ b/mne/minimum_norm/tests/test_snr.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Authors: Eric Larson <larson.eric.d at gmail.com>
+#          Matti Hamalainen <msh at nmr.mgh.harvard.edu>
+#
+# License: BSD (3-clause)
+
+import os
+from os import path as op
+import numpy as np
+from numpy.testing import assert_allclose
+
+from mne import read_evokeds
+from mne.datasets import testing
+from mne.minimum_norm import read_inverse_operator, estimate_snr
+
+from mne.utils import _TempDir, requires_mne, run_subprocess
+
+s_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fname_inv = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_evoked = op.join(s_path, 'sample_audvis-ave.fif')
+
+
+ at testing.requires_testing_data
+ at requires_mne
+def test_snr():
+    """Test SNR calculation"""
+    tempdir = _TempDir()
+    inv = read_inverse_operator(fname_inv)
+    evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
+    snr = estimate_snr(evoked, inv)[0]
+    orig_dir = os.getcwd()
+    os.chdir(tempdir)
+    try:
+        cmd = ['mne_compute_mne', '--inv', fname_inv, '--meas', fname_evoked,
+               '--snronly', '--bmin', '-200', '--bmax', '0']
+        run_subprocess(cmd)
+    except Exception:
+        pass  # this returns 1 for some reason
+    finally:
+        os.chdir(orig_dir)
+    snr_c = np.loadtxt(op.join(tempdir, 'SNR'))[:, 1]
+    assert_allclose(snr, snr_c, atol=1e-2, rtol=1e-2)
diff --git a/mne/minimum_norm/tests/test_time_frequency.py b/mne/minimum_norm/tests/test_time_frequency.py
index e8dd78d..c20e0d3 100644
--- a/mne/minimum_norm/tests/test_time_frequency.py
+++ b/mne/minimum_norm/tests/test_time_frequency.py
@@ -3,12 +3,15 @@ import os.path as op
 import numpy as np
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
+import warnings
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import io, find_events, Epochs, pick_types
+from mne.utils import run_tests_if_main
 from mne.label import read_label
 from mne.minimum_norm.inverse import (read_inverse_operator,
-                                      apply_inverse_epochs)
+                                      apply_inverse_epochs,
+                                      prepare_inverse_operator)
 from mne.minimum_norm.time_frequency import (source_band_induced_power,
                                              source_induced_power,
                                              compute_source_psd,
@@ -17,15 +20,16 @@ from mne.minimum_norm.time_frequency import (source_band_induced_power,
 
 from mne.time_frequency import multitaper_psd
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 fname_inv = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-meg-inv.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
 fname_data = op.join(data_path, 'MEG', 'sample',
-                     'sample_audvis_raw.fif')
+                     'sample_audvis_trunc_raw.fif')
 fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
+warnings.simplefilter('always')
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_tfr_with_inverse_operator():
     """Test time freq with MNE inverse computation"""
 
@@ -35,12 +39,14 @@ def test_tfr_with_inverse_operator():
     raw = io.Raw(fname_data)
     events = find_events(raw, stim_channel='STI 014')
     inverse_operator = read_inverse_operator(fname_inv)
+    inv = prepare_inverse_operator(inverse_operator, nave=1,
+                                   lambda2=1. / 9., method="dSPM")
 
     raw.info['bads'] += ['MEG 2443', 'EEG 053']  # bads + 2 more
 
     # picks MEG gradiometers
     picks = pick_types(raw.info, meg=True, eeg=False, eog=True,
-                            stim=False, exclude='bads')
+                       stim=False, exclude='bads')
 
     # Load condition 1
     event_id = 1
@@ -53,18 +59,19 @@ def test_tfr_with_inverse_operator():
     bands = dict(alpha=[10, 10])
     label = read_label(fname_label)
 
-    stcs = source_band_induced_power(epochs, inverse_operator, bands,
+    stcs = source_band_induced_power(epochs, inv, bands,
                                      n_cycles=2, use_fft=False, pca=True,
-                                     label=label)
+                                     label=label, prepared=True)
 
     stc = stcs['alpha']
     assert_true(len(stcs) == len(list(bands.keys())))
     assert_true(np.all(stc.data > 0))
     assert_array_almost_equal(stc.times, epochs.times)
 
-    stcs_no_pca = source_band_induced_power(epochs, inverse_operator, bands,
+    stcs_no_pca = source_band_induced_power(epochs, inv, bands,
                                             n_cycles=2, use_fft=False,
-                                            pca=False, label=label)
+                                            pca=False, label=label,
+                                            prepared=True)
 
     assert_array_almost_equal(stcs['alpha'].data, stcs_no_pca['alpha'].data)
 
@@ -74,15 +81,18 @@ def test_tfr_with_inverse_operator():
                     preload=True)
 
     frequencies = np.arange(7, 30, 2)  # define frequencies of interest
-    power, phase_lock = source_induced_power(epochs, inverse_operator,
-                            frequencies, label, baseline=(-0.1, 0),
-                            baseline_mode='percent', n_cycles=2, n_jobs=1)
+    power, phase_lock = source_induced_power(epochs, inv,
+                                             frequencies, label,
+                                             baseline=(-0.1, 0),
+                                             baseline_mode='percent',
+                                             n_cycles=2, n_jobs=1,
+                                             prepared=True)
     assert_true(np.all(phase_lock > 0))
     assert_true(np.all(phase_lock <= 1))
     assert_true(np.max(power) > 10)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_source_psd():
     """Test source PSD computation in label"""
     raw = io.Raw(fname_data)
@@ -98,11 +108,11 @@ def test_source_psd():
     assert_true(stc.times[0] >= fmin * 1e-3)
     assert_true(stc.times[-1] <= fmax * 1e-3)
     # Time max at line frequency (60 Hz in US)
-    assert_true(59e-3 <= stc.times[np.argmax(np.sum(stc.data, axis=0))]
-                      <= 61e-3)
+    assert_true(59e-3 <= stc.times[np.argmax(np.sum(stc.data, axis=0))] <=
+                61e-3)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_source_psd_epochs():
     """Test multi-taper source PSD computation in label from epochs"""
 
@@ -116,8 +126,8 @@ def test_source_psd_epochs():
     fmin, fmax = 0, 100
 
     picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
-                            ecg=True, eog=True, include=['STI 014'],
-                            exclude='bads')
+                       ecg=True, eog=True, include=['STI 014'],
+                       exclude='bads')
     reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
 
     events = find_events(raw, stim_channel='STI 014')
@@ -128,20 +138,24 @@ def test_source_psd_epochs():
     epochs.drop_bad_epochs()
     one_epochs = epochs[:1]
 
+    inv = prepare_inverse_operator(inverse_operator, nave=1,
+                                   lambda2=1. / 9., method="dSPM")
     # return list
-    stc_psd = compute_source_psd_epochs(one_epochs, inverse_operator,
+    stc_psd = compute_source_psd_epochs(one_epochs, inv,
                                         lambda2=lambda2, method=method,
                                         pick_ori="normal", label=label,
                                         bandwidth=bandwidth,
-                                        fmin=fmin, fmax=fmax)[0]
+                                        fmin=fmin, fmax=fmax,
+                                        prepared=True)[0]
 
     # return generator
-    stcs = compute_source_psd_epochs(one_epochs, inverse_operator,
+    stcs = compute_source_psd_epochs(one_epochs, inv,
                                      lambda2=lambda2, method=method,
                                      pick_ori="normal", label=label,
                                      bandwidth=bandwidth,
                                      fmin=fmin, fmax=fmax,
-                                     return_generator=True)
+                                     return_generator=True,
+                                     prepared=True)
 
     for stc in stcs:
         stc_psd_gen = stc
@@ -149,9 +163,10 @@ def test_source_psd_epochs():
     assert_array_almost_equal(stc_psd.data, stc_psd_gen.data)
 
     # compare with direct computation
-    stc = apply_inverse_epochs(one_epochs, inverse_operator,
+    stc = apply_inverse_epochs(one_epochs, inv,
                                lambda2=lambda2, method=method,
-                               pick_ori="normal", label=label)[0]
+                               pick_ori="normal", label=label,
+                               prepared=True)[0]
 
     sfreq = epochs.info['sfreq']
     psd, freqs = multitaper_psd(stc.data, sfreq=sfreq, bandwidth=bandwidth,
@@ -159,3 +174,27 @@ def test_source_psd_epochs():
 
     assert_array_almost_equal(psd, stc_psd.data)
     assert_array_almost_equal(freqs, stc_psd.times)
+
+    # Check corner cases caused by tiny bandwidth
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        compute_source_psd_epochs(one_epochs, inv,
+                                  lambda2=lambda2, method=method,
+                                  pick_ori="normal", label=label,
+                                  bandwidth=0.01, low_bias=True,
+                                  fmin=fmin, fmax=fmax,
+                                  return_generator=False,
+                                  prepared=True)
+        compute_source_psd_epochs(one_epochs, inv,
+                                  lambda2=lambda2, method=method,
+                                  pick_ori="normal", label=label,
+                                  bandwidth=0.01, low_bias=False,
+                                  fmin=fmin, fmax=fmax,
+                                  return_generator=False,
+                                  prepared=True)
+    assert_true(len(w) >= 2)
+    assert_true(any('not properly use' in str(ww.message) for ww in w))
+    assert_true(any('Bandwidth too small' in str(ww.message) for ww in w))
+
+
+run_tests_if_main()
diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py
index b7575cd..81e037b 100644
--- a/mne/minimum_norm/time_frequency.py
+++ b/mne/minimum_norm/time_frequency.py
@@ -6,7 +6,8 @@
 from warnings import warn
 
 import numpy as np
-from scipy import linalg, signal, fftpack
+from scipy import linalg, fftpack
+import warnings
 
 from ..io.constants import FIFF
 from ..source_estimate import _make_stc
@@ -22,12 +23,50 @@ from ..utils import logger, verbose
 from ..externals import six
 
 
+def _prepare_source_params(inst, inverse_operator, label=None,
+                           lambda2=1.0 / 9.0, method="dSPM", nave=1,
+                           decim=1, pca=True, pick_ori="normal",
+                           prepared=False, verbose=None):
+    """Prepare inverse operator and params for spectral / TFR analysis"""
+    if not prepared:
+        inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
+    else:
+        inv = inverse_operator
+    #
+    #   Pick the correct channels from the data
+    #
+    sel = _pick_channels_inverse_operator(inst.ch_names, inv)
+    logger.info('Picked %d channels from the data' % len(sel))
+    logger.info('Computing inverse...')
+    #
+    #   Simple matrix multiplication followed by combination of the
+    #   three current components
+    #
+    #   This does all the data transformations to compute the weights for the
+    #   eigenleads
+    #
+    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
+
+    if pca:
+        U, s, Vh = linalg.svd(K, full_matrices=False)
+        rank = np.sum(s > 1e-8 * s[0])
+        K = s[:rank] * U[:, :rank]
+        Vh = Vh[:rank]
+        logger.info('Reducing data rank to %d' % rank)
+    else:
+        Vh = None
+    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
+
+    return K, sel, Vh, vertno, is_free_ori, noise_norm
+
+
 @verbose
 def source_band_induced_power(epochs, inverse_operator, bands, label=None,
                               lambda2=1.0 / 9.0, method="dSPM", nave=1,
                               n_cycles=5, df=1, use_fft=False, decim=1,
                               baseline=None, baseline_mode='logratio',
-                              pca=True, n_jobs=1, verbose=None):
+                              pca=True, n_jobs=1, prepared=False,
+                              verbose=None):
     """Compute source space induced power in given frequency bands
 
     Parameters
@@ -50,10 +89,10 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
         Number of cycles. Fixed number or one per frequency.
     df : float
         delta frequency within bands.
-    decim : int
-        Temporal decimation factor.
     use_fft : bool
         Do convolutions in time or frequency domain with FFT.
+    decim : int
+        Temporal decimation factor.
     baseline : None (default) or tuple of length 2
         The time interval to apply baseline correction.
         If None do not apply it. If baseline is (a, b)
@@ -73,6 +112,8 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
         e.g. with a dataset that was maxfiltered (true dim is 64).
     n_jobs : int
         Number of jobs to run in parallel.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -86,13 +127,11 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
     frequencies = np.concatenate([np.arange(band[0], band[1] + df / 2.0, df)
                                  for _, band in six.iteritems(bands)])
 
-    powers, _, vertno = _source_induced_power(epochs,
-                                      inverse_operator, frequencies,
-                                      label=label,
-                                      lambda2=lambda2, method=method,
-                                      nave=nave, n_cycles=n_cycles,
-                                      decim=decim, use_fft=use_fft, pca=pca,
-                                      n_jobs=n_jobs, with_plv=False)
+    powers, _, vertno = _source_induced_power(
+        epochs, inverse_operator, frequencies, label=label, lambda2=lambda2,
+        method=method, nave=nave, n_cycles=n_cycles, decim=decim,
+        use_fft=use_fft, pca=pca, n_jobs=n_jobs, with_plv=False,
+        prepared=prepared)
 
     Fs = epochs.info['sfreq']  # sampling in Hz
     stcs = dict()
@@ -119,71 +158,94 @@ def source_band_induced_power(epochs, inverse_operator, bands, label=None,
     return stcs
 
 
- at verbose
-def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, with_plv,
-                     pick_ori, decim, verbose=None):
-    """Aux function for source_induced_power"""
+def _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori):
+    """Aux function to prepare TFR source localization"""
     n_times = data[:, :, ::decim].shape[2]
     n_freqs = len(Ws)
     n_sources = K.shape[0]
     is_free_ori = False
-    if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and pick_ori == None):
+    if (source_ori == FIFF.FIFFV_MNE_FREE_ORI and pick_ori is None):
         is_free_ori = True
         n_sources //= 3
 
     shape = (n_sources, n_freqs, n_times)
-    power = np.zeros(shape, dtype=np.float)  # power
-    if with_plv:
-        shape = (n_sources, n_freqs, n_times)
-        plv = np.zeros(shape, dtype=np.complex)  # phase lock
-    else:
-        plv = None
+    return shape, is_free_ori
 
-    for e in data:
-        e = e[sel]  # keep only selected channels
+
+ at verbose
+def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh,
+                     with_power, with_plv, pick_ori, decim, verbose=None):
+    """Aux function for induced power and PLV"""
+    shape, is_free_ori = _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori)
+    n_sources, n_times = shape[:2]
+    power = np.zeros(shape, dtype=np.float)  # power or raw TFR
+    # phase lock
+    plv = np.zeros(shape, dtype=np.complex) if with_plv else None
+
+    for epoch in data:
+        epoch = epoch[sel]  # keep only selected channels
 
         if Vh is not None:
-            e = np.dot(Vh, e)  # reducing data rank
+            epoch = np.dot(Vh, epoch)  # reducing data rank
 
-        for f, w in enumerate(Ws):
-            tfr = cwt(e, [w], use_fft=use_fft, decim=decim)
-            tfr = np.asfortranarray(tfr.reshape(len(e), -1))
+        power_e, plv_e = _single_epoch_tfr(
+            data=epoch, is_free_ori=is_free_ori, K=K, Ws=Ws, use_fft=use_fft,
+            decim=decim, shape=shape, with_plv=with_plv, with_power=with_power)
+
+        power += power_e
+        if with_plv:
+            plv += plv_e
+
+    return power, plv
 
-            # phase lock and power at freq f
-            if with_plv:
-                plv_f = np.zeros((n_sources, n_times), dtype=np.complex)
-            pow_f = np.zeros((n_sources, n_times), dtype=np.float)
 
-            for k, t in enumerate([np.real(tfr), np.imag(tfr)]):
-                sol = np.dot(K, t)
+def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape,
+                      with_plv, with_power):
+    """Compute single trial TFRs, either ITC, power or raw TFR"""
+    tfr_e = np.zeros(shape, dtype=np.float)  # power or raw TFR
+    # phase lock
+    plv_e = np.zeros(shape, dtype=np.complex) if with_plv else None
+    n_sources, _, n_times = shape
+    for f, w in enumerate(Ws):
+        tfr_ = cwt(data, [w], use_fft=use_fft, decim=decim)
+        tfr_ = np.asfortranarray(tfr_.reshape(len(data), -1))
 
-                sol_pick_normal = sol
-                if is_free_ori:
-                    sol_pick_normal = sol[2::3]
+        # phase lock and power at freq f
+        if with_plv:
+            plv_f = np.zeros((n_sources, n_times), dtype=np.complex)
 
-                if with_plv:
-                    if k == 0:  # real
-                        plv_f += sol_pick_normal
-                    else:  # imag
-                        plv_f += 1j * sol_pick_normal
+        tfr_f = np.zeros((n_sources, n_times), dtype=np.float)
 
-                if is_free_ori:
-                    logger.debug('combining the current components...')
-                    sol = combine_xyz(sol, square=True)
-                else:
-                    np.power(sol, 2, sol)
-                pow_f += sol
-                del sol
+        for k, t in enumerate([np.real(tfr_), np.imag(tfr_)]):
+            sol = np.dot(K, t)
 
-            power[:, f, :] += pow_f
-            del pow_f
+            sol_pick_normal = sol
+            if is_free_ori:
+                sol_pick_normal = sol[2::3]
 
             if with_plv:
-                plv_f /= np.abs(plv_f)
-                plv[:, f, :] += plv_f
-                del plv_f
+                if k == 0:  # real
+                    plv_f += sol_pick_normal
+                else:  # imag
+                    plv_f += 1j * sol_pick_normal
 
-    return power, plv
+            if is_free_ori:
+                logger.debug('combining the current components...')
+                sol = combine_xyz(sol, square=with_power)
+            elif with_power:
+                sol *= sol
+            tfr_f += sol
+            del sol
+
+        tfr_e[:, f, :] += tfr_f
+        del tfr_f
+
+        if with_plv:
+            plv_f /= np.abs(plv_f)
+            plv_e[:, f, :] += plv_f
+            del plv_f
+
+    return tfr_e, plv_e
 
 
 @verbose
@@ -191,41 +253,17 @@ def _source_induced_power(epochs, inverse_operator, frequencies, label=None,
                           lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
                           decim=1, use_fft=False, pca=True, pick_ori="normal",
                           n_jobs=1, with_plv=True, zero_mean=False,
-                          verbose=None):
-    """Aux function for source_induced_power
-    """
-    parallel, my_compute_pow_plv, n_jobs = parallel_func(_compute_pow_plv,
-                                                         n_jobs)
-    #
-    #   Set up the inverse according to the parameters
-    #
+                          prepared=False, verbose=None):
+    """Aux function for source induced power"""
     epochs_data = epochs.get_data()
-
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
-    #
-    #   Pick the correct channels from the data
-    #
-    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
-    logger.info('Picked %d channels from the data' % len(sel))
-    logger.info('Computing inverse...')
-    #
-    #   Simple matrix multiplication followed by combination of the
-    #   three current components
-    #
-    #   This does all the data transformations to compute the weights for the
-    #   eigenleads
-    #
-    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
-
-    if pca:
-        U, s, Vh = linalg.svd(K, full_matrices=False)
-        rank = np.sum(s > 1e-8 * s[0])
-        K = s[:rank] * U[:, :rank]
-        Vh = Vh[:rank]
-        logger.info('Reducing data rank to %d' % rank)
-    else:
-        Vh = None
-
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=epochs, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
+
+    inv = inverse_operator
+    parallel, my_compute_source_tfrs, n_jobs = parallel_func(
+        _compute_pow_plv, n_jobs)
     Fs = epochs.info['sfreq']  # sampling in Hz
 
     logger.info('Computing source power ...')
@@ -233,10 +271,12 @@ def _source_induced_power(epochs, inverse_operator, frequencies, label=None,
     Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
 
     n_jobs = min(n_jobs, len(epochs_data))
-    out = parallel(my_compute_pow_plv(data, K, sel, Ws,
-                                      inv['source_ori'], use_fft, Vh,
-                                      with_plv, pick_ori, decim)
-                        for data in np.array_split(epochs_data, n_jobs))
+    out = parallel(my_compute_source_tfrs(data=data, K=K, sel=sel, Ws=Ws,
+                                          source_ori=inv['source_ori'],
+                                          use_fft=use_fft, Vh=Vh,
+                                          with_plv=with_plv, with_power=True,
+                                          pick_ori=pick_ori, decim=decim)
+                   for data in np.array_split(epochs_data, n_jobs))
     power = sum(o[0] for o in out)
     power /= len(epochs_data)  # average power over epochs
 
@@ -258,8 +298,8 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
                          lambda2=1.0 / 9.0, method="dSPM", nave=1, n_cycles=5,
                          decim=1, use_fft=False, pick_ori=None,
                          baseline=None, baseline_mode='logratio', pca=True,
-                         n_jobs=1, zero_mean=False, verbose=None,
-                         pick_normal=None):
+                         n_jobs=1, zero_mean=False, prepared=False,
+                         verbose=None):
     """Compute induced power and phase lock
 
     Computation can optionaly be restricted in a label.
@@ -270,10 +310,10 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
         The epochs.
     inverse_operator : instance of InverseOperator
         The inverse operator.
-    label : Label
-        Restricts the source estimates to a given label.
     frequencies : array
         Array of frequencies of interest.
+    label : Label
+        Restricts the source estimates to a given label.
     lambda2 : float
         The regularization parameter of the minimum norm.
     method : "MNE" | "dSPM" | "sLORETA"
@@ -311,18 +351,23 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
         Number of jobs to run in parallel.
     zero_mean : bool
         Make sure the wavelets are zero mean.
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
     method = _check_method(method)
-    pick_ori = _check_ori(pick_ori, pick_normal)
+    pick_ori = _check_ori(pick_ori)
 
     power, plv, vertno = _source_induced_power(epochs,
-                            inverse_operator, frequencies,
-                            label=label, lambda2=lambda2, method=method,
-                            nave=nave, n_cycles=n_cycles, decim=decim,
-                            use_fft=use_fft, pick_ori=pick_ori,
-                            pca=pca, n_jobs=n_jobs)
+                                               inverse_operator, frequencies,
+                                               label=label, lambda2=lambda2,
+                                               method=method, nave=nave,
+                                               n_cycles=n_cycles, decim=decim,
+                                               use_fft=use_fft,
+                                               pick_ori=pick_ori,
+                                               pca=pca, n_jobs=n_jobs,
+                                               prepared=False)
 
     # Run baseline correction
     if baseline is not None:
@@ -336,8 +381,7 @@ def source_induced_power(epochs, inverse_operator, frequencies, label=None,
 def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
                        tmin=None, tmax=None, fmin=0., fmax=200.,
                        n_fft=2048, overlap=0.5, pick_ori=None, label=None,
-                       nave=1, pca=True, verbose=None, pick_normal=None,
-                       NFFT=None):
+                       nave=1, pca=True, prepared=False, verbose=None):
     """Compute source power spectrum density (PSD)
 
     Parameters
@@ -376,7 +420,9 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
     pca: bool
         If True, the true dimension of data is estimated before running
         the time frequency transforms. It reduces the computation times
-        e.g. with a dataset that was maxfiltered (true dim is 64)
+        e.g. with a dataset that was maxfiltered (true dim is 64).
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -385,41 +431,15 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
     stc : SourceEstimate | VolSourceEstimate
         The PSD (in dB) of each of the sources.
     """
-    if NFFT is not None:
-        n_fft = NFFT
-        warnings.warn("`NFFT` is deprecated and will be removed in v0.9. "
-                      "Use `n_fft` instead")
-
-    pick_ori = _check_ori(pick_ori, pick_normal)
+    from scipy.signal import hanning
+    pick_ori = _check_ori(pick_ori)
 
     logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
 
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
-    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-
-    #
-    #   Pick the correct channels from the data
-    #
-    sel = _pick_channels_inverse_operator(raw.ch_names, inv)
-    logger.info('Picked %d channels from the data' % len(sel))
-    logger.info('Computing inverse...')
-    #
-    #   Simple matrix multiplication followed by combination of the
-    #   three current components
-    #
-    #   This does all the data transformations to compute the weights for the
-    #   eigenleads
-    #
-    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
-
-    if pca:
-        U, s, Vh = linalg.svd(K, full_matrices=False)
-        rank = np.sum(s > 1e-8 * s[0])
-        K = s[:rank] * U[:, :rank]
-        Vh = Vh[:rank]
-        logger.info('Reducing data rank to %d' % rank)
-    else:
-        Vh = None
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=raw, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
 
     start, stop = 0, raw.last_samp + 1 - raw.first_samp
     if tmin is not None:
@@ -428,7 +448,7 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
         stop = raw.time_as_index(tmax)[0] + 1
     n_fft = int(n_fft)
     Fs = raw.info['sfreq']
-    window = signal.hanning(n_fft)
+    window = hanning(n_fft)
     freqs = fftpack.fftfreq(n_fft, 1. / Fs)
     freqs_mask = (freqs >= 0) & (freqs >= fmin) & (freqs <= fmax)
     freqs = freqs[freqs_mask]
@@ -450,10 +470,10 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
         data_fft = fftpack.fft(data)[:, freqs_mask]
         sol = np.dot(K, data_fft)
 
-        if is_free_ori and pick_ori == None:
+        if is_free_ori and pick_ori is None:
             sol = combine_xyz(sol, square=True)
         else:
-            sol = np.abs(sol) ** 2
+            sol = (sol * sol.conj()).real
 
         if method != "MNE":
             sol *= noise_norm ** 2
@@ -473,41 +493,19 @@ def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
 
 @verbose
 def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
-                              method="dSPM", fmin=0., fmax=200.,
-                              pick_ori=None, label=None, nave=1,
-                              pca=True, inv_split=None, bandwidth=4.,
-                              adaptive=False, low_bias=True, n_jobs=1,
-                              verbose=None):
+                               method="dSPM", fmin=0., fmax=200.,
+                               pick_ori=None, label=None, nave=1,
+                               pca=True, inv_split=None, bandwidth=4.,
+                               adaptive=False, low_bias=True, n_jobs=1,
+                               prepared=False, verbose=None):
     """ Generator for compute_source_psd_epochs """
 
     logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))
 
-    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
-    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
-
-    #
-    #   Pick the correct channels from the data
-    #
-    sel = _pick_channels_inverse_operator(epochs.ch_names, inv)
-    logger.info('Picked %d channels from the data' % len(sel))
-    logger.info('Computing inverse...')
-    #
-    #   Simple matrix multiplication followed by combination of the
-    #   three current components
-    #
-    #   This does all the data transformations to compute the weights for the
-    #   eigenleads
-    #
-    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)
-
-    if pca:
-        U, s, Vh = linalg.svd(K, full_matrices=False)
-        rank = np.sum(s > 1e-8 * s[0])
-        K = s[:rank] * U[:, :rank]
-        Vh = Vh[:rank]
-        logger.info('Reducing data rank to %d' % rank)
-    else:
-        Vh = None
+    K, sel, Vh, vertno, is_free_ori, noise_norm = _prepare_source_params(
+        inst=epochs, inverse_operator=inverse_operator, label=label,
+        lambda2=lambda2, method=method, nave=nave, pca=pca, pick_ori=pick_ori,
+        prepared=prepared, verbose=verbose)
 
     # split the inverse operator
     if inv_split is not None:
@@ -521,6 +519,9 @@ def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
 
     # compute standardized half-bandwidth
     half_nbw = float(bandwidth) * n_times / (2 * sfreq)
+    if half_nbw < 0.5:
+        warnings.warn('Bandwidth too small, using minimum (normalized 0.5)')
+        half_nbw = 0.5
     n_tapers_max = int(2 * half_nbw)
 
     dpss, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
@@ -575,8 +576,9 @@ def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
             # compute the psd
             if adaptive:
                 out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
-                       for x in np.array_split(x_mt_src,
-                                               min(n_jobs, len(x_mt_src))))
+                               for x in np.array_split(x_mt_src,
+                                                       min(n_jobs,
+                                                           len(x_mt_src))))
                 this_psd = np.concatenate(out)
             else:
                 x_mt_src = x_mt_src[:, :, freq_mask]
@@ -586,7 +588,7 @@ def _compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
             pos += K_part.shape[0]
 
         # combine orientations
-        if is_free_ori and pick_ori == None:
+        if is_free_ori and pick_ori is None:
             psd = combine_xyz(psd, square=False)
 
         if method != "MNE":
@@ -606,7 +608,7 @@ def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
                               pca=True, inv_split=None, bandwidth=4.,
                               adaptive=False, low_bias=True,
                               return_generator=False, n_jobs=1,
-                              verbose=None, pick_normal=None):
+                              prepared=False, verbose=None):
     """Compute source power spectrum density (PSD) from Epochs using
        multi-taper method
 
@@ -651,6 +653,8 @@ def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
         over the stcs without having to keep them all in memory.
     n_jobs : int
         Number of parallel jobs to use (only used if adaptive=True).
+    prepared : bool
+        If True, do not call `prepare_inverse_operator`.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -662,11 +666,15 @@ def compute_source_psd_epochs(epochs, inverse_operator, lambda2=1. / 9.,
 
     # use an auxiliary function so we can either return a generator or a list
     stcs_gen = _compute_source_psd_epochs(epochs, inverse_operator,
-                              lambda2=lambda2, method=method, fmin=fmin,
-                              fmax=fmax, pick_ori=pick_ori, label=label,
-                              nave=nave, pca=pca, inv_split=inv_split,
-                              bandwidth=bandwidth, adaptive=adaptive,
-                              low_bias=low_bias, n_jobs=n_jobs)
+                                          lambda2=lambda2, method=method,
+                                          fmin=fmin, fmax=fmax,
+                                          pick_ori=pick_ori, label=label,
+                                          nave=nave, pca=pca,
+                                          inv_split=inv_split,
+                                          bandwidth=bandwidth,
+                                          adaptive=adaptive,
+                                          low_bias=low_bias, n_jobs=n_jobs,
+                                          prepared=prepared)
 
     if return_generator:
         # return generator object
diff --git a/mne/misc.py b/mne/misc.py
index 6367e36..ab5f1bd 100644
--- a/mne/misc.py
+++ b/mne/misc.py
@@ -16,9 +16,11 @@ def parse_config(fname):
     -------
     conditions : list of dict
         Each condition is indexed by the event type.
-        A condition contains as keys:
+        A condition contains as keys::
+
             tmin, tmax, name, grad_reject, mag_reject,
             eeg_reject, eog_reject
+
     """
     reject_params = read_reject_parameters(fname)
 
@@ -58,7 +60,13 @@ def parse_config(fname):
 
 
 def read_reject_parameters(fname):
-    """Read rejection parameters from .cov or .ave config file"""
+    """Read rejection parameters from .cov or .ave config file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to read.
+    """
 
     try:
         with open(fname, 'r') as f:
@@ -74,7 +82,7 @@ def read_reject_parameters(fname):
         words = line.split()
         if words[0] in reject_names:
             reject[reject_pynames[reject_names.index(words[0])]] = \
-                                                                float(words[1])
+                float(words[1])
 
     return reject
 
@@ -95,6 +103,6 @@ def read_flat_parameters(fname):
         words = line.split()
         if words[0] in reject_names:
             flat[reject_pynames[reject_names.index(words[0])]] = \
-                                                                float(words[1])
+                float(words[1])
 
     return flat
diff --git a/mne/mixed_norm/__init__.py b/mne/mixed_norm/__init__.py
deleted file mode 100644
index ec14c8a..0000000
--- a/mne/mixed_norm/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from ..utils import deprecated
-from ..inverse_sparse import mxne_inverse
-
-dec = deprecated('Use the function from mne.inverse_sparse')
-
-mixed_norm = dec(mxne_inverse.mixed_norm)
-tf_mixed_norm = dec(mxne_inverse.tf_mixed_norm)
diff --git a/mne/parallel.py b/mne/parallel.py
index 1a9885f..8c93acc 100644
--- a/mne/parallel.py
+++ b/mne/parallel.py
@@ -18,6 +18,7 @@ if 'MNE_FORCE_SERIAL' in os.environ:
 else:
     _force_serial = None
 
+
 @verbose
 def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
     """Return parallel instance with delayed function
@@ -33,7 +34,7 @@ def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         INFO or DEBUG will print parallel status, others will not.
-    max_nbytes int, str, or None
+    max_nbytes : int, str, or None
         Threshold on the minimum size of arrays passed to the workers that
         triggers automated memmory mapping. Can be an int in Bytes,
         or a human-readable string, e.g., '1M' for 1 megabyte.
@@ -69,8 +70,8 @@ def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
             return parallel, my_func, n_jobs
 
     # check if joblib is recent enough to support memmaping
-    aspec = inspect.getargspec(Parallel.__init__)
-    joblib_mmap = ('temp_folder' in aspec.args and 'max_nbytes' in aspec.args)
+    p_args = inspect.getargspec(Parallel.__init__).args
+    joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
 
     cache_dir = get_config('MNE_CACHE_DIR', None)
     if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':
@@ -118,17 +119,16 @@ def check_n_jobs(n_jobs, allow_cuda=False):
         The checked number of jobs. Always positive (or 'cuda' if
         applicable.)
     """
-    if _force_serial:
-        n_jobs = 1
-        logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
-                    'serial mode.')
-
-    elif not isinstance(n_jobs, int):
+    if not isinstance(n_jobs, int):
         if not allow_cuda:
             raise ValueError('n_jobs must be an integer')
         elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':
             raise ValueError('n_jobs must be an integer, or "cuda"')
-        #else, we have n_jobs='cuda' and this is okay, so do nothing
+        # else, we have n_jobs='cuda' and this is okay, so do nothing
+    elif _force_serial:
+        n_jobs = 1
+        logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
+                    'serial mode.')
     elif n_jobs <= 0:
         try:
             import multiprocessing
diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py
index 920f922..e1f6420 100644
--- a/mne/preprocessing/__init__.py
+++ b/mne/preprocessing/__init__.py
@@ -11,6 +11,9 @@ from .maxfilter import apply_maxfilter
 from .ssp import compute_proj_ecg, compute_proj_eog
 from .eog import find_eog_events, create_eog_epochs
 from .ecg import find_ecg_events, create_ecg_epochs
-from .ica import (ICA, ica_find_eog_events, ica_find_ecg_events, score_funcs,
-                  read_ica, run_ica)
+from .ica import (ICA, ica_find_eog_events, ica_find_ecg_events,
+                  get_score_funcs, read_ica, run_ica)
 from .bads import find_outliers
+from .stim import fix_stim_artifact
+from .maxwell import _maxwell_filter
+from .xdawn import Xdawn
diff --git a/mne/preprocessing/bads.py b/mne/preprocessing/bads.py
index 9ea677c..c2f6827 100644
--- a/mne/preprocessing/bads.py
+++ b/mne/preprocessing/bads.py
@@ -3,11 +3,14 @@
 
 
 import numpy as np
-from scipy import stats
 
 
-def find_outliers(X, threshold=3.0):
-    """Find outliers based on Gaussian mixture
+def find_outliers(X, threshold=3.0, max_iter=2):
+    """Find outliers based on iterated Z-scoring
+
+    This procedure compares the absolute z-score against the threshold.
+    After excluding local outliers, the comparison is repeated until no
+    local outlier is present any more.
 
     Parameters
     ----------
@@ -15,18 +18,19 @@ def find_outliers(X, threshold=3.0):
         The scores for which to find outliers.
     threshold : float
         The value above which a feature is classified as outlier.
+    max_iter : int
+        The maximum number of iterations.
 
     Returns
     -------
     bad_idx : np.ndarray of int, shape (n_features)
         The outlier indices.
     """
-    max_iter = 2
+    from scipy.stats import zscore
     my_mask = np.zeros(len(X), dtype=np.bool)
-    X = np.abs(X)
     for _ in range(max_iter):
         X = np.ma.masked_array(X, my_mask)
-        this_z = stats.zscore(X)
+        this_z = np.abs(zscore(X))
         local_bad = this_z > threshold
         my_mask = np.max([my_mask, local_bad], 0)
         if not np.any(local_bad):
diff --git a/mne/preprocessing/ctps_.py b/mne/preprocessing/ctps_.py
index 3699a72..606a9de 100644
--- a/mne/preprocessing/ctps_.py
+++ b/mne/preprocessing/ctps_.py
@@ -5,7 +5,6 @@
 import math
 
 import numpy as np
-from scipy.signal import hilbert
 
 
 def _compute_normalized_phase(data):
@@ -21,6 +20,7 @@ def _compute_normalized_phase(data):
     phase_angles : ndarray, shape (n_epochs, n_sources, n_times)
         The normalized phase angles.
     """
+    from scipy.signal import hilbert
     return (np.angle(hilbert(data)) + np.pi) / (2 * np.pi)
 
 
diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py
index 2293e5d..1976318 100644
--- a/mne/preprocessing/ecg.py
+++ b/mne/preprocessing/ecg.py
@@ -1,3 +1,10 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import warnings
 from ..externals.six import string_types
 import numpy as np
 
@@ -82,8 +89,8 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
             if window[0] > thresh1:
                 max_time = np.argmax(window)
                 time.append(ii + max_time)
-                nx = np.sum(np.diff(((window > thresh1).astype(np.int)
-                                     == 1).astype(int)))
+                nx = np.sum(np.diff(((window > thresh1).astype(np.int) ==
+                                     1).astype(int)))
                 numcross.append(nx)
                 rms.append(np.sqrt(sum_squared(window) / window.size))
                 ii += win_size
@@ -105,8 +112,8 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
         clean_events.append(ce)
 
     # pick the best threshold; first get effective heart rates
-    rates = np.array([60. * len(ce) / (len(ecg) / float(sfreq))
-                      for ce in clean_events])
+    rates = np.array([60. * len(cev) / (len(ecg) / float(sfreq))
+                      for cev in clean_events])
 
     # now find heart rates that seem reasonable (infant thru adult athlete)
     idx = np.where(np.logical_and(rates <= 160., rates >= 40.))[0]
@@ -131,10 +138,11 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
         The raw data
     event_id : int
         The index to assign to found events
-    ch_name : str
+    ch_name : None | str
         The name of the channel to use for ECG peak detection.
-        The argument is mandatory if the dataset contains no ECG
-        channels.
+        If None (default), a synthetic ECG channel is created from
+        cross channel average. Synthetic channel can only be created from
+        'meg' channels.
     tstart : float
         Start detection after tstart seconds. Useful when beginning
         of run is noisy.
@@ -160,16 +168,13 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
     average_pulse : float
         Estimated average pulse.
     """
-    try:
-        idx_ecg = _get_ecg_channel_index(ch_name, raw)
-        assert len(idx_ecg) == 1
-        logger.info('Using channel %s to identify heart beats'
-                    % raw.ch_names[idx_ecg[0]])
-
+    idx_ecg = _get_ecg_channel_index(ch_name, raw)
+    if idx_ecg is not None:
+        logger.info('Using channel %s to identify heart beats.'
+                    % raw.ch_names[idx_ecg])
         ecg, times = raw[idx_ecg, :]
-    except RuntimeError:
+    else:
         ecg, times = _make_ecg(raw, None, None, verbose)
-        idx_ecg = None
 
     # detecting QRS and generating event file
     ecg_events = qrs_detector(raw.info['sfreq'], ecg.ravel(), tstart=tstart,
@@ -181,34 +186,40 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
     logger.info("Number of ECG events detected : %d (average pulse %d / "
                 "min.)" % (n_events, average_pulse))
 
-    ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
-                       event_id * np.ones(n_events)]
+    ecg_events = np.array([ecg_events + raw.first_samp,
+                           np.zeros(n_events, int),
+                           event_id * np.ones(n_events, int)]).T
     return ecg_events, idx_ecg, average_pulse
 
 
 def _get_ecg_channel_index(ch_name, inst):
-     # Geting ECG Channel
+    """Geting ECG channel index. If no channel found returns None."""
     if ch_name is None:
         ecg_idx = pick_types(inst.info, meg=False, eeg=False, stim=False,
                              eog=False, ecg=True, emg=False, ref_meg=False,
                              exclude='bads')
     else:
-        ecg_idx = pick_channels(inst.ch_names, include=[ch_name])
-        if len(ecg_idx) == 0:
+        if ch_name not in inst.ch_names:
             raise ValueError('%s not in channel list (%s)' %
                              (ch_name, inst.ch_names))
+        ecg_idx = pick_channels(inst.ch_names, include=[ch_name])
+
+    if len(ecg_idx) == 0:
+        return None
+        # raise RuntimeError('No ECG channel found. Please specify ch_name '
+        #                    'parameter e.g. MEG 1531')
 
-    if len(ecg_idx) == 0 and ch_name is None:
-        raise RuntimeError('No ECG channel found. Please specify ch_name '
-                           'parameter e.g. MEG 1531')
+    if len(ecg_idx) > 1:
+        warnings.warn('More than one ECG channel found. Using only %s.'
+                      % inst.ch_names[ecg_idx[0]])
 
-    return ecg_idx
+    return ecg_idx[0]
 
 
 @verbose
 def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
                       tmin=-0.5, tmax=0.5, l_freq=8, h_freq=16, reject=None,
-                      flat=None, verbose=None, baseline=None):
+                      flat=None, baseline=None, preload=True, verbose=None):
     """Conveniently generate epochs around ECG artifact events
 
 
@@ -216,10 +227,11 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
     ----------
     raw : instance of Raw
         The raw data
-    ch_name : str
+    ch_name : None | str
         The name of the channel to use for ECG peak detection.
-        The argument is mandatory if the dataset contains no ECG
-        channels.
+        If None (default), a synthetic ECG channel is created from
+        cross channel average. Synthetic channel can only be created from
+        'meg' channels.
     event_id : int
         The index to assign to found events
     picks : array-like of int | None (default)
@@ -233,14 +245,20 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
     h_freq : float
         High pass frequency.
     reject : dict | None
-        Rejection parameters based on peak to peak amplitude.
+        Rejection parameters based on peak-to-peak amplitude.
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-        If reject is None then no rejection is done. You should
-        use such parameters to reject big measurement artifacts
-        and not ECG for example
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
     flat : dict | None
-        Rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
         If flat is None then no rejection is done.
     baseline : tuple or list of length 2, or None
         The time interval to apply rescaling / baseline correction.
@@ -250,26 +268,28 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
         and if b is None then b is set to the end of the interval.
         If baseline is equal ot (None, None) all the time
         interval is used. If None, no correction is applied.
+    preload : bool
+        Preload epochs or not.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
-    eog_epochs : instance of Epochs
-        Data epoched around EOG events.
+    ecg_epochs : instance of Epochs
+        Data epoched around ECG r-peaks.
     """
 
     events, _, _ = find_ecg_events(raw, ch_name=ch_name, event_id=event_id,
                                    l_freq=l_freq, h_freq=h_freq,
                                    verbose=verbose)
-    if picks is not None:
+    if picks is None:
         picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=False)
 
     # create epochs around ECG events and baseline (important)
     ecg_epochs = Epochs(raw, events=events, event_id=event_id,
                         tmin=tmin, tmax=tmax, proj=False,
                         picks=picks, reject=reject, baseline=baseline,
-                        verbose=verbose, preload=True)
+                        verbose=verbose, preload=preload)
     return ecg_epochs
 
 
@@ -277,7 +297,7 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None,
 def _make_ecg(inst, start, stop, verbose=None):
     """Create ECG signal from cross channel average
     """
-    if not any([c in inst for c in ['mag', 'grad']]):
+    if not any(c in inst for c in ['mag', 'grad']):
         raise ValueError('Unable to generate artifical ECG channel')
     for ch in ['mag', 'grad']:
         if ch in inst:
diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py
index b4b29e7..ece895c 100644
--- a/mne/preprocessing/eog.py
+++ b/mne/preprocessing/eog.py
@@ -1,3 +1,9 @@
+# Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Denis Engemann <denis.engemann at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
 import numpy as np
 
 from .peak_finder import peak_finder
@@ -19,10 +25,10 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
         The raw data.
     event_id : int
         The index to assign to found events.
-    low_pass : float
-        Low pass frequency.
-    high_pass : float
-        High pass frequency.
+    l_freq : float
+        Low cut-off frequency in Hz.
+    h_freq : float
+        High cut-off frequency in Hz.
     filter_length : str | int | None
         Number of taps to use for filtering.
     ch_name: str | None
@@ -88,8 +94,9 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
     eog_events += n_samples_start
     n_events = len(eog_events)
     logger.info("Number of EOG events detected : %d" % n_events)
-    eog_events = np.c_[eog_events + first_samp, np.zeros(n_events),
-                       event_id * np.ones(n_events)]
+    eog_events = np.array([eog_events + first_samp,
+                           np.zeros(n_events, int),
+                           event_id * np.ones(n_events, int)]).T
 
     return eog_events
 
@@ -132,8 +139,8 @@ def _get_eog_channel_index(ch_name, inst):
 @verbose
 def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
                       tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
-                      reject=None, flat=None,
-                      baseline=None, verbose=None):
+                      reject=None, flat=None, baseline=None,
+                      preload=True, verbose=None):
     """Conveniently generate epochs around EOG artifact events
 
     Parameters
@@ -141,8 +148,8 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
     raw : instance of Raw
         The raw data
     ch_name : str
-        The name of the channel to use for ECG peak detection.
-        The argument is mandatory if the dataset contains no ECG channels.
+        The name of the channel to use for EOG peak detection.
+        The argument is mandatory if the dataset contains no EOG channels.
     event_id : int
         The index to assign to found events
     picks : array-like of int | None (default)
@@ -157,17 +164,21 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
     h_freq : float
         High pass frequency.
     reject : dict | None
-        Rejection parameters based on peak to peak amplitude.
+        Rejection parameters based on peak-to-peak amplitude.
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-        If reject is None then no rejection is done. You should
-        use such parameters to reject big measurement artifacts
-        and not ECG for example
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels)
+                          )
+
     flat : dict | None
-        Rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
         If flat is None then no rejection is done.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
     baseline : tuple or list of length 2, or None
         The time interval to apply rescaling / baseline correction.
         If None do not apply it. If baseline is (a, b)
@@ -176,11 +187,15 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
         and if b is None then b is set to the end of the interval.
         If baseline is equal ot (None, None) all the time
         interval is used. If None, no correction is applied.
+    preload : bool
+        Preload epochs or not.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
-    ecg_epochs : instance of Epochs
-        Data epoched around ECG r-peaks.
+    eog_epochs : instance of Epochs
+        Data epoched around EOG events.
     """
     events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
                              l_freq=l_freq, h_freq=h_freq)
@@ -189,5 +204,5 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
     eog_epochs = Epochs(raw, events=events, event_id=event_id,
                         tmin=tmin, tmax=tmax, proj=False, reject=reject,
                         flat=flat, picks=picks, baseline=baseline,
-                        preload=True)
+                        preload=preload)
     return eog_epochs
diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py
index 180ac50..0d7f42b 100644
--- a/mne/preprocessing/ica.py
+++ b/mne/preprocessing/ica.py
@@ -4,18 +4,14 @@
 #
 # License: BSD (3-clause)
 
-import warnings
-
-from copy import deepcopy
 from inspect import getargspec, isfunction
 from collections import namedtuple
+from copy import deepcopy
 
 import os
 import json
 
 import numpy as np
-from scipy import stats
-from scipy.spatial import distance
 from scipy import linalg
 
 from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
@@ -38,50 +34,56 @@ from ..io.base import _BaseRaw
 from ..epochs import _BaseEpochs
 from ..viz import (plot_ica_components, plot_ica_scores,
                    plot_ica_sources, plot_ica_overlay)
-from ..channels import _contains_ch_type, ContainsMixin
+from ..viz.utils import (_prepare_trellis, tight_layout,
+                         _setup_vmin_vmax)
+from ..viz.topomap import (_prepare_topo_plot, _check_outlines,
+                           plot_topomap)
+
+from ..channels.channels import _contains_ch_type, ContainsMixin
 from ..io.write import start_file, end_file, write_id
-from ..utils import (check_sklearn_version, logger, check_fname, verbose,
-                     deprecated, _reject_data_segments)
+from ..utils import (check_version, logger, check_fname, verbose,
+                     _reject_data_segments, check_random_state,
+                     _get_fast_dot, compute_corr)
 from ..filter import band_pass_filter
 from .bads import find_outliers
 from .ctps_ import ctps
 from ..externals.six import string_types, text_type
 
-try:
-    from sklearn.utils.extmath import fast_dot
-except ImportError:
-    fast_dot = np.dot
-
 
 def _make_xy_sfunc(func, ndim_output=False):
     """Aux function"""
     if ndim_output:
-        sfunc = lambda x, y: np.array([func(a, y.ravel()) for a in x])[:, 0]
+        def sfunc(x, y):
+            return np.array([func(a, y.ravel()) for a in x])[:, 0]
     else:
-        sfunc = lambda x, y: np.array([func(a, y.ravel()) for a in x])
+        def sfunc(x, y):
+            return np.array([func(a, y.ravel()) for a in x])
     sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
     sfunc.__doc__ = func.__doc__
     return sfunc
 
-# makes score funcs attr accessible for users
-score_funcs = Bunch()
-
-xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items() if isfunction(f)
-                     and not n.startswith('_')]
 
-xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items() if isfunction(f)
-                      and not n.startswith('_')]
-
-score_funcs.update(dict((n, _make_xy_sfunc(f)) for n, f in xy_arg_dist_funcs
-                   if getargspec(f).args == ['u', 'v']))
-
-score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
-                   for n, f in xy_arg_stats_funcs
-                   if getargspec(f).args == ['x', 'y']))
-
-
-__all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events', 'score_funcs',
-           'read_ica', 'run_ica']
+# makes score funcs attr accessible for users
+def get_score_funcs():
+    """Helper to get the score functions"""
+    from scipy import stats
+    from scipy.spatial import distance
+    score_funcs = Bunch()
+    xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
+                         if isfunction(f) and not n.startswith('_')]
+    xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
+                          if isfunction(f) and not n.startswith('_')]
+    score_funcs.update(dict((n, _make_xy_sfunc(f))
+                            for n, f in xy_arg_dist_funcs
+                            if getargspec(f).args == ['u', 'v']))
+    score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
+                            for n, f in xy_arg_stats_funcs
+                            if getargspec(f).args == ['x', 'y']))
+    return score_funcs
+
+
+__all__ = ['ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
+           'get_score_funcs', 'read_ica', 'run_ica']
 
 
 class ICA(ContainsMixin):
@@ -98,7 +100,7 @@ class ICA(ContainsMixin):
     temporally removed before fitting the ICA. You can say::
 
         >> projs, raw.info['projs'] = raw.info['projs'], []
-        >> ica.decompose_raw(raw)
+        >> ica.fit(raw)
         >> raw.info['projs'] = projs
 
     Parameters
@@ -116,8 +118,8 @@ class ICA(ContainsMixin):
         The number of PCA components used after ICA recomposition. The ensuing
         attribute allows to balance noise reduction against potential loss of
         features due to dimensionality reduction. If greater than
-        `self.n_components_`, the next `n_pca_components` minus
-        `n_components_` PCA components will be added before restoring the
+        ``self.n_components_``, the next ``n_pca_components`` minus
+        ``n_components_`` PCA components will be added before restoring the
         sensor space data. The attribute gets updated each time the according
         parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
         If float, the number of components selected matches the number of
@@ -129,25 +131,9 @@ class ICA(ContainsMixin):
     random_state : None | int | instance of np.random.RandomState
         np.random.RandomState to initialize the FastICA estimation.
         As the estimation is non-deterministic it can be useful to
-        fix the seed to have reproducible results.
+        fix the seed to have reproducible results. Defaults to None.
     method : {'fastica', 'infomax', 'extended-infomax'}
         The ICA method to use. Defaults to 'fastica'.
-    algorithm : {'parallel', 'deflation'}
-        Apply parallel or deflational algorithm for FastICA. This parameter
-        belongs to FastICA and is deprecated. Please use `fit_params` instead.
-    fun : string or function, optional. Default: 'logcosh'
-        The functional form of the G function used in the
-        approximation to neg-entropy. Could be either 'logcosh', 'exp',
-        or 'cube'.
-        You can also provide your own function. It should return a tuple
-        containing the value of the function, and of its derivative, in the
-        point. This parameter belongs to FastICA and is deprecated.
-        Please use `fit_params` instead.
-    fun_args: dictionary, optional
-        Arguments to send to the functional form.
-        If empty and if fun='logcosh', fun_args will take value
-        {'alpha' : 1.0}. This parameter belongs to FastICA and is deprecated.
-        Please use `fit_params` instead.
     fit_params : dict | None.
         Additional parameters passed to the ICA estimator chosen by `method`.
     max_iter : int, optional
@@ -163,7 +149,7 @@ class ICA(ContainsMixin):
     ch_names : list-like
         Channel names resulting from initial picking.
         The number of components used for ICA decomposition.
-    `n_components_` : int
+    ``n_components_`` : int
         If fit, the actual number of components used for ICA decomposition.
     n_pca_components : int
         See above.
@@ -171,15 +157,15 @@ class ICA(ContainsMixin):
         The number of components used for PCA dimensionality reduction.
     verbose : bool, str, int, or None
         See above.
-    `pca_components_` : ndarray
+    ``pca_components_` : ndarray
         If fit, the PCA components
-    `pca_mean_` : ndarray
+    ``pca_mean_`` : ndarray
         If fit, the mean vector used to center the data before doing the PCA.
-    `pca_explained_variance_` : ndarray
+    ``pca_explained_variance_`` : ndarray
         If fit, the variance explained by each PCA component
-    `mixing_matrix_` : ndarray
+    ``mixing_matrix_`` : ndarray
         If fit, the mixing matrix to restore observed data, else None.
-    `unmixing_matrix_` : ndarray
+    ``unmixing_matrix_`` : ndarray
         If fit, the matrix to unmix observed data, else None.
     exclude : list
         List of sources indices to exclude, i.e. artifact components identified
@@ -193,18 +179,21 @@ class ICA(ContainsMixin):
         The measurement info copied from the object fitted.
     `n_samples_` : int
         the number of samples used on fit.
+    `labels_` : dict
+        A dictionary of independent component indices, grouped by types of
+        independent components. This attribute is set by some of the artifact
+        detection functions.
     """
     @verbose
     def __init__(self, n_components=None, max_pca_components=None,
                  n_pca_components=None, noise_cov=None, random_state=None,
-                 method='fastica',
-                 algorithm=None, fun=None, fun_args=None,
-                 fit_params=None, max_iter=200, verbose=None):
+                 method='fastica', fit_params=None, max_iter=200,
+                 verbose=None):
         methods = ('fastica', 'infomax', 'extended-infomax')
         if method not in methods:
             raise ValueError('`method` must be "%s". You passed: "%s"' %
                              ('" or "'.join(methods), method))
-        if not check_sklearn_version(min_version='0.12'):
+        if not check_version('sklearn', '0.12'):
             raise RuntimeError('the scikit-learn package (version >= 0.12)'
                                'is required for ICA')
 
@@ -226,18 +215,7 @@ class ICA(ContainsMixin):
         self.max_pca_components = max_pca_components
         self.n_pca_components = n_pca_components
         self.ch_names = None
-        self.random_state = random_state if random_state is not None else 42
-
-        for attr in ['algorithm', 'fun', 'fun_args']:
-            if eval(attr) is not None:
-                warnings.warn('The parameter `%s` is deprecated and will be'
-                              'removed in MNE 0.9. Please use '
-                              '`fit_params` instead' % attr,
-                              DeprecationWarning)
-
-        self.algorithm = algorithm
-        self.fun = fun
-        self.fun_args = fun_args
+        self.random_state = random_state
 
         if fit_params is None:
             fit_params = {}
@@ -308,14 +286,21 @@ class ICA(ContainsMixin):
             Increment for selecting each nth time slice. If None, all samples
             within ``start`` and ``stop`` are used.
         reject : dict | None
-            Rejection parameters based on peak to peak amplitude.
+            Rejection parameters based on peak-to-peak amplitude.
             Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-            If reject is None then no rejection is done. You should
-            use such parameters to reject big measurement artifacts
-            and not EOG for example. It only applies if `inst` is of type Raw.
+            If reject is None then no rejection is done. Example::
+
+                reject = dict(grad=4000e-13, # T / m (gradiometers)
+                              mag=4e-12, # T (magnetometers)
+                              eeg=40e-6, # uV (EEG channels)
+                              eog=250e-6 # uV (EOG channels)
+                              )
+
+            It only applies if `inst` is of type Raw.
         flat : dict | None
-            Rejection parameters based on flatness of signal
-            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+            Rejection parameters based on flatness of signal.
+            Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+            are floats that set the minimum acceptable peak-to-peak amplitude.
             If flat is None then no rejection is done.
             It only applies if `inst` is of type Raw.
         tstep : float
@@ -346,6 +331,9 @@ class ICA(ContainsMixin):
         del self.mixing_matrix_
         del self.n_components_
         del self.n_samples_
+        del self.pca_components_
+        del self.pca_explained_variance_
+        del self.pca_mean_
         if hasattr(self, 'drop_inds_'):
             del self.drop_inds_
 
@@ -429,11 +417,12 @@ class ICA(ContainsMixin):
 
     def _pre_whiten(self, data, info, picks):
         """Aux function"""
+        fast_dot = _get_fast_dot()
         has_pre_whitener = hasattr(self, '_pre_whitener')
         if not has_pre_whitener and self.noise_cov is None:
             # use standardization as whitener
             # Scale (z-score) the data by channel type
-            info = pick_info(deepcopy(info), picks)
+            info = pick_info(info, picks)
             pre_whitener = np.empty([len(data), 1])
             for ch_type in ['mag', 'grad', 'eeg']:
                 if _contains_ch_type(info, ch_type):
@@ -460,9 +449,11 @@ class ICA(ContainsMixin):
         """Aux function """
         from sklearn.decomposition import RandomizedPCA
 
+        random_state = check_random_state(self.random_state)
+
         # XXX fix copy==True later. Bug in sklearn, see PR #2273
         pca = RandomizedPCA(n_components=max_pca_components, whiten=True,
-                            copy=True, random_state=self.random_state)
+                            copy=True, random_state=random_state)
 
         if isinstance(self.n_components, float):
             # compute full feature variance before doing PCA
@@ -474,8 +465,8 @@ class ICA(ContainsMixin):
             # compute eplained variance manually, cf. sklearn bug
             # fixed in #2664
             explained_variance_ratio_ = pca.explained_variance_ / full_var
-            n_components_ = np.sum(explained_variance_ratio_.cumsum()
-                                   <= self.n_components)
+            n_components_ = np.sum(explained_variance_ratio_.cumsum() <=
+                                   self.n_components)
             if n_components_ < 1:
                 raise RuntimeError('One PCA component captures most of the '
                                    'explained variance, your threshold resu'
@@ -485,12 +476,13 @@ class ICA(ContainsMixin):
                         n_components_)
             sel = slice(n_components_)
         else:
-            logger.info('Selection by number: %i components' %
-                        self.n_components)
             if self.n_components is not None:  # normal n case
                 sel = slice(self.n_components)
+                logger.info('Selection by number: %i components' %
+                            self.n_components)
             else:  # None case
-                logger.info('Using all PCA components: %i' % pca.components_)
+                logger.info('Using all PCA components: %i'
+                            % len(pca.components_))
                 sel = slice(len(pca.components_))
 
         # the things to store for PCA
@@ -510,19 +502,22 @@ class ICA(ContainsMixin):
         if self.method == 'fastica':
             from sklearn.decomposition import FastICA  # to avoid strong dep.
             ica = FastICA(whiten=False,
-                          random_state=self.random_state, **self.fit_params)
+                          random_state=random_state, **self.fit_params)
             ica.fit(data[:, sel])
             # get unmixing and add scaling
             self.unmixing_matrix_ = getattr(ica, 'components_',
                                             'unmixing_matrix_')
         elif self.method in ('infomax', 'extended-infomax'):
-            self.unmixing_matrix_ = infomax(data[:, sel], **self.fit_params)
+            self.unmixing_matrix_ = infomax(data[:, sel],
+                                            random_state=random_state,
+                                            **self.fit_params)
         self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :]
         self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
         self.current_fit = fit_type
 
     def _transform(self, data):
         """Compute sources from data (operates inplace)"""
+        fast_dot = _get_fast_dot()
         if self.pca_mean_ is not None:
             data -= self.pca_mean_[:, None]
 
@@ -537,11 +532,12 @@ class ICA(ContainsMixin):
             raise RuntimeError('No fit available. Please fit ICA.')
         start, stop = _check_start_stop(raw, start, stop)
 
-        picks = [raw.ch_names.index(k) for k in self.ch_names]
+        picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
         if len(picks) != len(self.ch_names):
-            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+            raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
                                'fitted but %i channels supplied. \nPlease '
-                               'provide Epochs compatible with '
+                               'provide Raw compatible with '
                                'ica.ch_names' % (len(self.ch_names),
                                                  len(picks)))
 
@@ -554,9 +550,8 @@ class ICA(ContainsMixin):
         if not hasattr(self, 'mixing_matrix_'):
             raise RuntimeError('No fit available. Please fit ICA')
 
-        picks = pick_types(epochs.info, include=self.ch_names, exclude=[],
-                           ref_meg=False)
-
+        picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
         # special case where epochs come picked but fit was 'unpicked'.
         if len(picks) != len(self.ch_names):
             raise RuntimeError('Epochs don\'t match fitted data: %i channels '
@@ -581,13 +576,13 @@ class ICA(ContainsMixin):
         if not hasattr(self, 'mixing_matrix_'):
             raise RuntimeError('No fit available. Please first fit ICA')
 
-        picks = pick_types(evoked.info, include=self.ch_names, exclude=[],
-                           ref_meg=False)
+        picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
+                           meg=False, ref_meg=False)
 
         if len(picks) != len(self.ch_names):
-            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
-                               'fitted but %i channels supplied. \nPlease '
-                               'provide Epochs compatible with '
+            raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide Evoked compatible with '
                                'ica.ch_names' % (len(self.ch_names),
                                                  len(picks)))
 
@@ -643,12 +638,12 @@ class ICA(ContainsMixin):
         # merge copied instance and picked data with sources
         sources = self._transform_raw(raw, start=start, stop=stop)
         if raw.preload:  # get data and temporarily delete
-            data, times = raw._data, raw._times
-            del raw._data, raw._times
+            data = raw._data
+            del raw._data
 
         out = raw.copy()  # copy and reappend
         if raw.preload:
-            raw._data, raw._times = data, times
+            raw._data = data
 
         # populate copied raw.
         start, stop = _check_start_stop(raw, start, stop)
@@ -665,11 +660,14 @@ class ICA(ContainsMixin):
         out.preload = True
 
         # update first and last samples
-        out.first_samp = raw.first_samp + (start if start else 0)
-        out.last_samp = out.first_samp + stop if stop else raw.last_samp
+        out._first_samps = np.array([raw.first_samp +
+                                     (start if start else 0)])
+        out._last_samps = np.array([out.first_samp + stop
+                                    if stop else raw.last_samp])
 
         out._projector = None
         self._export_info(out.info, raw, add_channels)
+        out._update_times()
 
         return out
 
@@ -686,7 +684,7 @@ class ICA(ContainsMixin):
 
         self._export_info(out.info, epochs, add_channels)
         out.preload = True
-        out.raw = None
+        out._raw = None
         out._projector = None
 
         return out
@@ -724,9 +722,8 @@ class ICA(ContainsMixin):
                                 kind=FIFF.FIFFV_MISC_CH,
                                 coord_Frame=FIFF.FIFFV_COORD_UNKNOWN,
                                 loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),
-                                unit=FIFF.FIFF_UNIT_NONE, eeg_loc=None,
-                                range=1.0, scanno=ii + 1, unit_mul=0,
-                                coil_trans=None))
+                                unit=FIFF.FIFF_UNIT_NONE,
+                                range=1.0, scanno=ii + 1, unit_mul=0))
 
         if add_channels is not None:
             # re-append additionally picked ch_names
@@ -761,11 +758,11 @@ class ICA(ContainsMixin):
             Callable taking as arguments either two input arrays
             (e.g. Pearson correlation) or one input
             array (e. g. skewness) and returns a float. For convenience the
-            most common score_funcs are available via string labels: Currently,
-            all distance metrics from scipy.spatial and all functions from
-            scipy.stats taking compatible input arguments are supported. These
-            function have been modified to support iteration over the rows of a
-            2D array.
+            most common score_funcs are available via string labels:
+            Currently, all distance metrics from scipy.spatial and All
+            functions from scipy.stats taking compatible input arguments are
+            supported. These function have been modified to support iteration
+            over the rows of a 2D array.
         start : int | float | None
             First sample to include. If float, data will be interpreted as
             time in seconds. If None, data will be used from the first sample.
@@ -804,8 +801,8 @@ class ICA(ContainsMixin):
             if verbose is None:
                 verbose = self.verbose
             if isinstance(inst, (_BaseRaw, _BaseRaw)):
-                sources, target = _band_pass_filter(self, sources, target, l_freq,
-                                                    h_freq, verbose)
+                sources, target = _band_pass_filter(self, sources, target,
+                                                    l_freq, h_freq, verbose)
 
         scores = _find_sources(sources, target, score_func)
 
@@ -886,10 +883,14 @@ class ICA(ContainsMixin):
         Returns
         -------
         ecg_idx : list of int
-            The indices of EOG related components.
-        scores : np.ndarray of float, shape (ica.n_components_)
+            The indices of ECG related components.
+        scores : np.ndarray of float, shape (``n_components_``)
             The correlation scores.
 
+        See also
+        --------
+        find_bads_eog
+
         References
         ----------
         [1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
@@ -900,11 +901,10 @@ class ICA(ContainsMixin):
         """
         if verbose is None:
             verbose = self.verbose
-        try:
-            idx_ecg = _get_ecg_channel_index(ch_name, inst)
-        except RuntimeError:
-            idx_ecg = []
-        if not np.any(idx_ecg):
+
+        idx_ecg = _get_ecg_channel_index(ch_name, inst)
+
+        if idx_ecg is None:
             if verbose is not None:
                 verbose = self.verbose
             ecg, times = _make_ecg(inst, start, stop, verbose)
@@ -914,7 +914,10 @@ class ICA(ContainsMixin):
 
         # some magic we need inevitably ...
         if inst.ch_names != self.ch_names:
-            inst = inst.pick_channels(self.ch_names, copy=True)
+            extra_picks = pick_types(inst.info, meg=False, ecg=True)
+            ch_names_to_pick = (self.ch_names +
+                                [inst.ch_names[k] for k in extra_picks])
+            inst = inst.pick_channels(ch_names_to_pick, copy=True)
 
         if method == 'ctps':
             if threshold is None:
@@ -939,10 +942,13 @@ class ICA(ContainsMixin):
                                         verbose=verbose)
             ecg_idx = find_outliers(scores, threshold=threshold)
         else:
-            raise ValueError('Mehtod "%s" not supported.' % method)
+            raise ValueError('Method "%s" not supported.' % method)
         # sort indices by scores
         ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
-        return list(ecg_idx), scores
+        if not hasattr(self, 'labels_'):
+            self.labels_ = dict()
+        self.labels_['ecg'] = list(ecg_idx)
+        return self.labels_['ecg'], scores
 
     @verbose
     def find_bads_eog(self, inst, ch_name=None, threshold=3.0,
@@ -951,7 +957,7 @@ class ICA(ContainsMixin):
         """Detect EOG related components using correlation
 
         Detection is based on Pearson correlation between the
-        filtered data and the filtered ECG channel.
+        filtered data and the filtered EOG channel.
         Thresholding is based on adaptive z-scoring. The above threshold
         components will be masked and the z-score will be recomputed
         until no supra-threshold component remains.
@@ -961,7 +967,7 @@ class ICA(ContainsMixin):
         inst : instance of Raw, Epochs or Evoked
             Object to compute sources from.
         ch_name : str
-            The name of the channel to use for ECG peak detection.
+            The name of the channel to use for EOG peak detection.
             The argument is mandatory if the dataset contains no ECG
             channels.
         threshold : int | float
@@ -982,10 +988,14 @@ class ICA(ContainsMixin):
 
         Returns
         -------
-        ecg_idx : list of int
+        eog_idx : list of int
             The indices of EOG related components, sorted by score.
-        scores : np.ndarray of float, shape (ica.n_components_) | list of array
+        scores : np.ndarray of float, shape (``n_components_``) | list of array
             The correlation scores.
+
+        See Also
+        --------
+        find_bads_ecg
         """
         if verbose is None:
             verbose = self.verbose
@@ -1027,7 +1037,10 @@ class ICA(ContainsMixin):
         if len(scores) == 1:
             scores = scores[0]
 
-        return eog_idx, scores
+        if not hasattr(self, 'labels_'):
+            self.labels_ = dict()
+        self.labels_['eog'] = list(eog_idx)
+        return self.labels_['eog'], scores
 
     def apply(self, inst, include=None, exclude=None,
               n_pca_components=None, start=None, stop=None,
@@ -1100,7 +1113,7 @@ class ICA(ContainsMixin):
         start, stop = _check_start_stop(raw, start, stop)
 
         picks = pick_types(raw.info, meg=False, include=self.ch_names,
-                           exclude='bads')
+                           exclude='bads', ref_meg=False)
 
         data = raw[picks, start:stop][0]
         data, _ = self._pre_whiten(data, raw.info, picks)
@@ -1181,13 +1194,13 @@ class ICA(ContainsMixin):
 
     def _pick_sources(self, data, include, exclude):
         """Aux function"""
+        fast_dot = _get_fast_dot()
         if exclude is None:
             exclude = self.exclude
         else:
             exclude = list(set(self.exclude + list(exclude)))
 
-        _n_pca_comp = _check_n_pca_components(self, self.n_pca_components,
-                                              self.verbose)
+        _n_pca_comp = self._check_n_pca_components(self.n_pca_components)
 
         if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
             raise ValueError('n_pca_components must be >= '
@@ -1254,27 +1267,40 @@ class ICA(ContainsMixin):
 
         try:
             _write_ica(fid, self)
-        except Exception as inst:
+        except Exception:
             os.remove(fname)
-            raise inst
+            raise
         end_file(fid)
 
         return self
 
-    def plot_components(self, picks=None, ch_type='mag', res=64, layout=None,
-                        vmin=None, vmax=None, cmap='RdBu_r', sensors='k,',
+    def copy(self):
+        """Copy the ICA object
+
+        Returns
+        -------
+        ica : instance of ICA
+            The copied object.
+        """
+        return deepcopy(self)
+
+    def plot_components(self, picks=None, ch_type=None, res=64, layout=None,
+                        vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
                         colorbar=False, title=None, show=True, outlines='head',
-                        contours=6, image_interp='bilinear'):
-        """Project unmixing matrix on interpolated sensor topogrpahy.
+                        contours=6, image_interp='bilinear', head_pos=None):
+        """Project unmixing matrix on interpolated sensor topography.
 
         Parameters
         ----------
         picks : int | array-like | None
             The indices of the sources to be plotted.
             If None all are plotted in batches of 20.
-        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
             The channel type to plot. For 'grad', the gradiometers are
             collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
+        res : int
+            The resolution of the topomap image (n pixels along each side).
         layout : None | Layout
             Layout instance specifying sensor positions (does not need to
             be specified for Neuromag data). If possible, the correct layout is
@@ -1292,24 +1318,36 @@ class ICA(ContainsMixin):
             Colormap.
         sensors : bool | str
             Add markers for sensor locations to the plot. Accepts matplotlib
-            plot format string (e.g., 'r+' for red plusses).
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
         colorbar : bool
             Plot a colorbar.
-        res : int
-            The resolution of the topomap image (n pixels along each side).
+        title : str | None
+            Title to use.
         show : bool
             Call pyplot.show() at the end.
-        outlines : 'head' | dict | None
-            The outlines to be drawn. If 'head', a head scheme will be drawn.
-            If dict, each key refers to a tuple of x and y positions. The
-            values in 'mask_pos' will serve as image mask. If None,
-            nothing will be drawn. Defaults to 'head'.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
         contours : int | False | None
             The number of contour lines to draw. If 0, no contours will
             be drawn.
         image_interp : str
             The image interpolation to be used. All matplotlib options are
             accepted.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
 
         Returns
         -------
@@ -1323,10 +1361,11 @@ class ICA(ContainsMixin):
                                    sensors=sensors, colorbar=colorbar,
                                    title=title, show=show,
                                    outlines=outlines, contours=contours,
-                                   image_interp=image_interp)
+                                   image_interp=image_interp,
+                                   head_pos=head_pos)
 
     def plot_sources(self, inst, picks=None, exclude=None, start=None,
-                     stop=None, show=True, title=None):
+                     stop=None, title=None, show=True, block=False):
         """Plot estimated latent sources given the unmixing matrix.
 
         Typical usecases:
@@ -1343,32 +1382,50 @@ class ICA(ContainsMixin):
         picks : ndarray | None.
             The components to be displayed. If None, plot will show the
             sources in the order as fitted.
+        exclude : array_like of int
+            The components marked for exclusion. If None (default), ICA.exclude
+            will be used.
         start : int
             X-axis start index. If None from the beginning.
         stop : int
             X-axis stop index. If None to the end.
-        exclude : array_like of int
-            The components marked for exclusion. If None (default), ICA.exclude
-            will be used.
         title : str | None
             The figure title. If None a default is provided.
         show : bool
-            If True, plot will be shown, else just the figure is returned.
+            If True, all open plots will be shown.
+        block : bool
+            Whether to halt program execution until the figure is closed.
+            Useful for interactive selection of components in raw and epoch
+            plotter. For evoked, this parameter has no effect. Defaults to
+            False.
 
         Returns
         -------
         fig : instance of pyplot.Figure
             The figure.
+
+        Notes
+        -----
+        For raw and epoch instances, it is possible to select components for
+        exclusion by clicking on the line. The selected components are added to
+        ``ica.exclude`` on close. The independent components can be viewed as
+        topographies by clicking on the component name on the left of of the
+        main axes. The topography view tries to infer the correct electrode
+        layout from the data. This should work at least for Neuromag data.
+
+        .. versionadded:: 0.10.0
         """
 
         return plot_ica_sources(self, inst=inst, picks=picks, exclude=exclude,
-                                title=title, start=start, stop=stop, show=show)
+                                title=title, start=start, stop=stop, show=show,
+                                block=block)
 
     def plot_scores(self, scores, exclude=None, axhline=None,
-                    title='ICA component scores', figsize=(12, 6)):
+                    title='ICA component scores', figsize=(12, 6),
+                    show=True):
         """Plot scores related to detected components.
 
-        Use this function to asses how well your score describes outlier
+        Use this function to assess how well your score describes outlier
         sources and how well you were detecting them.
 
         Parameters
@@ -1383,7 +1440,9 @@ class ICA(ContainsMixin):
         title : str
             The figure title.
         figsize : tuple of int
-            The figure size. Defaults to (12, 6)
+            The figure size. Defaults to (12, 6).
+        show : bool
+            If True, all open plots will be shown.
 
         Returns
         -------
@@ -1391,13 +1450,14 @@ class ICA(ContainsMixin):
             The figure object.
         """
         return plot_ica_scores(ica=self, scores=scores, exclude=exclude,
-                               axhline=axhline, title=title, figsize=figsize)
+                               axhline=axhline, title=title,
+                               figsize=figsize, show=show)
 
-    def plot_overlay(self, inst, exclude=None, start=None, stop=None,
-                     title=None):
+    def plot_overlay(self, inst, exclude=None, picks=None, start=None,
+                     stop=None, title=None, show=True):
         """Overlay of raw and cleaned signals given the unmixing matrix.
 
-        This method helps visualizing signal quality and arficat rejection.
+        This method helps visualizing signal quality and artifact rejection.
 
         Parameters
         ----------
@@ -1420,280 +1480,16 @@ class ICA(ContainsMixin):
             X-axis stop index. If None to the end.
         title : str
             The figure title.
+        show : bool
+            If True, all open plots will be shown.
 
         Returns
         -------
         fig : instance of pyplot.Figure
             The figure.
         """
-        return plot_ica_overlay(self, inst=inst, exclude=exclude, start=start,
-                                stop=stop, title=title)
-
-    @deprecated('`decompose_raw` is deprecated and will be removed in MNE 0.9.'
-                ' Use `fit` instead')
-    @verbose
-    def decompose_raw(self, raw, picks=None, start=None, stop=None,
-                      decim=None, reject=None, flat=None, tstep=2.0,
-                      verbose=None):
-        """This method is deprecated.
-        See ``ICA.fit``
-        """
-        return self.fit(raw, picks, start, stop, decim, reject, flat, tstep,
-                        verbose)
-
-    @deprecated('`decompose_epochs` is deprecated and will be removed in MNE'
-                ' 1.0. Use `fit` instead')
-    @verbose
-    def decompose_epochs(self, epochs, picks=None, decim=None, verbose=None):
-        """This method is deprecated.
-        See ``ICA.fit``
-        """
-        return self._fit_epochs(epochs, picks, decim, verbose)
-
-    @deprecated('`get_sources_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `get_sources` instead')
-    def get_sources_raw(self, raw, start=None, stop=None):
-        """This method is deprecated.
-        See ``ICA.fit``
-        """
-        return self._transform_raw(raw, start, stop)
-
-    @deprecated('`get_sources_epochs` is deprecated and will be removed in '
-                'MNE 0.9. Use `get_sources` instead')
-    def get_sources_epochs(self, epochs, concatenate=False):
-        """This method is deprecated.
-        See ``ICA.get_sources``
-        """
-        return self._transform_epochs(epochs, concatenate)
-
-    @deprecated('`sources_as_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `get_sources` instead')
-    def sources_as_raw(self, raw, picks=None, start=None, stop=None):
-        """This method is deprecated
-
-        see ``ICA.get_sources``.
-        """
-        if picks is None:
-            picks = pick_types(raw.info, meg=False, eeg=False, misc=True,
-                               ecg=True, eog=True, stim=True, exclude='bads')
-
-        add_channels = [raw.ch_names[k] for k in picks]
-        return self.get_sources(raw, add_channels, start, stop)
-
-    @deprecated('`sources_as_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `get_sources` instead')
-    def sources_as_epochs(self, epochs, picks=None):
-        """This method is deprecated
-
-        see ``ICA.get_sources``.
-        """
-        if picks is None:
-            picks = pick_types(epochs.info, meg=False, eeg=False, misc=True,
-                               ecg=True, eog=True, stim=True, exclude='bads')
-
-        add_channels = [epochs.ch_names[k] for k in picks]
-        return self.get_sources(epochs, add_channels, False)
-
-    @deprecated('`find_sources_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `find_bads` instead')
-    def find_sources_raw(self, raw, target=None, score_func='pearsonr',
-                         start=None, stop=None, l_freq=None, h_freq=None):
-        """Find sources based on own distribution or based on similarity to
-        other sources or between source and target.
-
-        Parameters
-        ----------
-        raw : instance of Raw
-            Raw object to draw sources from.
-        target : array-like | ch_name | None
-            Signal to which the sources shall be compared. It has to be of
-            the same shape as the sources. If some string is supplied, a
-            routine will try to find a matching channel. If None, a score
-            function expecting only one input-array argument must be used,
-            for instance, scipy.stats.skew (default).
-        score_func : callable | str label
-            Callable taking as arguments either two input arrays
-            (e.g. pearson correlation) or one input
-            array (e. g. skewness) and returns a float. For convenience the
-            most common score_funcs are available via string labels: Currently,
-            all distance metrics from scipy.spatial and all functions from
-            scipy.stats taking compatible input arguments are supported. These
-            function have been modified to support iteration over the rows of a
-            2D array.
-        start : int | float | None
-            First sample to include. If float, data will be interpreted as
-            time in seconds. If None, data will be used from the first sample.
-        stop : int | float | None
-            Last sample to not include. If float, data will be interpreted as
-            time in seconds. If None, data will be used to the last sample.
-        scores : ndarray
-            Scores for each source as returned from score_func.
-
-        Returns
-        -------
-        scores : ndarray
-            scores for each source as returned from score_func
-        """
-        return self.score_sources(inst=raw, target=target,
-                                  score_func=score_func,
-                                  start=start, stop=stop, l_freq=l_freq,
-                                  h_freq=h_freq)
-
-    @deprecated('`find_sources_epochs` is deprecated and will be removed in '
-                'MNE 0.9. Use `find_bads` instead')
-    def find_sources_epochs(self, epochs, target=None, score_func='pearsonr',
-                            l_freq=None, h_freq=None):
-        """Find sources based on relations between source and target
-
-        Parameters
-        ----------
-        epochs : instance of Epochs
-            Epochs object to draw sources from.
-        target : array-like | ch_name | None
-            Signal to which the sources shall be compared. It has to be of
-            the same shape as the sources. If some string is supplied, a
-            routine will try to find a matching channel. If None, a score
-            function expecting only one input-array argument must be used,
-            for instance, scipy.stats.skew (default).
-        score_func : callable | str label
-            Callable taking as arguments either two input arrays
-            (e.g. pearson correlation) or one input
-            array (e. g. skewness) and returns a float. For convenience the
-            most common score_funcs are available via string labels: Currently,
-            all distance metrics from scipy.spatial and all functions from
-            scipy.stats taking compatible input arguments are supported. These
-            function have been modified to support iteration over the rows of a
-            2D array.
-
-        Returns
-        -------
-        scores : ndarray
-            scores for each source as returned from score_func
-        """
-        return self.score_sources(inst=epochs, target=target,
-                                  score_func=score_func, l_freq=l_freq,
-                                  h_freq=h_freq)
-
-    @deprecated('`pick_sources_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `apply` instead')
-    def pick_sources_raw(self, raw, include=None, exclude=None,
-                         n_pca_components=None, start=None, stop=None,
-                         copy=True):
-        """Recompose raw data including or excluding some sources
-
-        Parameters
-        ----------
-        raw : instance of Raw
-            Raw object to pick to remove ICA components from.
-        include : list-like | None
-            The source indices to use. If None all are used.
-        exclude : list-like | None
-            The source indices to remove. If None all are used.
-        n_pca_components : int | float
-            The number of PCA components to be unwhitened, where
-            `n_components_` is the lower bound and max_pca_components
-            the upper bound. If greater than `self.n_components_`, the next
-            `n_pca_components` minus 'n_components' PCA components will
-            be added before restoring the sensor space data. This can be used
-            to take back the PCA dimension reduction. If float, the number of
-            components selected matches the number of components with a
-            cumulative explained variance below `n_pca_components`.
-        start : int | float | None
-            First sample to include. If float, data will be interpreted as
-            time in seconds. If None, data will be used from the first sample.
-        stop : int | float | None
-            Last sample to not include. If float, data will be interpreted as
-            time in seconds. If None, data will be used to the last sample.
-        copy: bool
-            modify raw instance in place or return modified copy.
-
-        Returns
-        -------
-        raw : instance of Raw
-            raw instance with selected ICA components removed
-        """
-        return self.apply(inst=raw, include=include, exclude=exclude,
-                          n_pca_components=n_pca_components, start=stop,
-                          stop=stop, copy=copy)
-
-    @deprecated('`pick_sources_epochs` is deprecated and will be removed in '
-                'MNE 0.9. Use `apply` instead')
-    def pick_sources_epochs(self, epochs, include=None, exclude=None,
-                            n_pca_components=None, copy=True):
-        """Recompose epochs
-
-        Parameters
-        ----------
-        epochs : instance of Epochs
-            Epochs object to pick to remove ICA components from.
-            Data must be preloaded.
-        include : list-like | None
-            The source indices to use. If None all are used.
-        exclude : list-like | None
-            The source indices to remove. If None  all are used.
-        n_pca_components : int | float
-            The number of PCA components to be unwhitened, where
-            `n_components_` is the lower bound and max_pca_components
-            the upper bound. If greater than `self.n_components_`, the next
-            `n_pca_components` minus `n_components_` PCA components will
-            be added before restoring the sensor space data. This can be used
-            to take back the PCA dimension reduction. If float, the number of
-            components selected matches the number of components with a
-            cumulative explained variance below `n_pca_components`.
-        copy : bool
-            Modify Epochs instance in place or return modified copy.
-
-        Returns
-        -------
-        epochs : instance of Epochs
-            Epochs with selected ICA components removed.
-        """
-        return self.apply(inst=epochs, include=include,
-                          exclude=exclude, n_pca_components=n_pca_components,
-                          copy=copy)
-
-    @deprecated('`pick_topomap` is deprecated and will be removed in '
-                'MNE 0.9. Use `plot_components` instead')
-    def plot_topomap(self, source_idx, ch_type='mag', res=64, layout=None,
-                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
-                     show=True):
-        """This method is deprecatd
-
-        see ``ica.plot_components``.
-        """
-        return self.plot_components(picks=source_idx,
-                                    ch_type=ch_type,
-                                    res=res, layout=layout, vmax=vmax,
-                                    cmap=cmap,
-                                    sensors=sensors, colorbar=colorbar,
-                                    show=show)
-
-    @deprecated('`plot_sources_raw` is deprecated and will be removed in '
-                'MNE 0.9. Use `plot_sources` instead')
-    def plot_sources_raw(self, raw, order=None, start=None, stop=None,
-                         n_components=None, source_idx=None, ncol=3, nrow=None,
-                         title=None, show=True):
-        """This method is deprecated.
-
-        See ``ica.plot_sources``
-        """
-        fig = self.plot_sources(inst=raw, picks=source_idx, ncol=ncol,
-                                title=title, show=show)
-
-        return fig
-
-    @deprecated('`plot_sources_epochs` is deprecated and will be removed in '
-                'MNE 0.9. Use `plot_sources` instead')
-    def plot_sources_epochs(self, epochs, order=None, epoch_idx=None,
-                            start=None, stop=None, n_components=None,
-                            source_idx=None, ncol=3, nrow=None, title=None,
-                            show=True):
-        """This method is deprecated.
-
-        See ``ica.plot_sources``
-        """
-        return plot_ica_sources(self, inst=epochs[epoch_idx], picks=order,
-                                start=start, stop=stop, ncol=ncol)
+        return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
+                                start=start, stop=stop, title=title, show=show)
 
     def detect_artifacts(self, raw, start_find=None, stop_find=None,
                          ecg_ch=None, ecg_score_func='pearsonr',
@@ -1724,6 +1520,8 @@ class ICA(ContainsMixin):
 
         Parameters
         ----------
+        raw : instance of Raw
+            Raw object to draw sources from.
         start_find : int | float | None
             First sample to include for artifact search. If float, data will be
             interpreted as time in seconds. If None, data will be used from the
@@ -1805,22 +1603,21 @@ class ICA(ContainsMixin):
 
         return self
 
+    @verbose
+    def _check_n_pca_components(self, _n_pca_comp, verbose=None):
+        """Aux function"""
+        if isinstance(_n_pca_comp, float):
+            _n_pca_comp = ((self.pca_explained_variance_ /
+                           self.pca_explained_variance_.sum()).cumsum() <=
+                           _n_pca_comp).sum()
+            logger.info('Selected %i PCA components by explained '
+                        'variance' % _n_pca_comp)
+        elif _n_pca_comp is None:
+            _n_pca_comp = self.max_pca_components
+        elif _n_pca_comp < self.n_components_:
+            _n_pca_comp = self.n_components_
 
- at verbose
-def _check_n_pca_components(ica, _n_pca_comp, verbose=None):
-    """Aux function"""
-    if isinstance(_n_pca_comp, float):
-        _n_pca_comp = ((ica.pca_explained_variance_ /
-                       ica.pca_explained_variance_.sum()).cumsum()
-                       <= _n_pca_comp).sum()
-        logger.info('Selected %i PCA components by explained '
-                    'variance' % _n_pca_comp)
-    elif _n_pca_comp is None:
-        _n_pca_comp = ica.max_pca_components
-    elif _n_pca_comp < ica.n_components_:
-        _n_pca_comp = ica.n_components_
-
-    return _n_pca_comp
+        return _n_pca_comp
 
 
 def _check_start_stop(raw, start, stop):
@@ -1837,12 +1634,12 @@ def ica_find_ecg_events(raw, ecg_source, event_id=999,
 
     Parameters
     ----------
+    raw : instance of Raw
+        Raw object to draw sources from.
     ecg_source : ndarray
         ICA source resembling ECG to find peaks from.
     event_id : int
         The index to assign to found events.
-    raw : instance of Raw
-        Raw object to draw sources from.
     tstart : float
         Start detection after tstart seconds. Useful when beginning
         of run is noisy.
@@ -1894,10 +1691,10 @@ def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
         ICA source resembling EOG to find peaks from.
     event_id : int
         The index to assign to found events.
-    low_pass : float
-        Low pass frequency.
-    high_pass : float
-        High pass frequency.
+    l_freq : float
+        Low cut-off frequency in Hz.
+    h_freq : float
+        High cut-off frequency in Hz.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1924,14 +1721,14 @@ def _get_target_ch(container, target):
 
     if len(picks) == 0:
         raise ValueError('%s not in channel list (%s)' %
-                        (target, container.ch_names))
+                         (target, container.ch_names))
     return picks
 
 
 def _find_sources(sources, target, score_func):
     """Aux function"""
     if isinstance(score_func, string_types):
-        score_func = score_funcs.get(score_func, score_func)
+        score_func = get_score_funcs().get(score_func, score_func)
 
     if not callable(score_func):
         raise ValueError('%s is not a valid score_func.' % score_func)
@@ -1984,10 +1781,7 @@ def _write_ica(fid, ica):
                     n_components=ica.n_components,
                     n_pca_components=ica.n_pca_components,
                     max_pca_components=ica.max_pca_components,
-                    current_fit=ica.current_fit,
-                    algorithm=ica.algorithm,
-                    fun=ica.fun,
-                    fun_args=ica.fun_args)
+                    current_fit=ica.current_fit)
 
     if ica.info is not None:
         start_block(fid, FIFF.FIFFB_MEAS)
@@ -2066,12 +1860,13 @@ def read_ica(fname):
 
     try:
         info, meas = read_meas_info(fid, tree)
-        info['filename'] = fname
     except ValueError:
         logger.info('Could not find the measurement info. \n'
                     'Functionality requiring the info won\'t be'
                     ' available.')
         info = None
+    else:
+        info['filename'] = fname
 
     ica_data = dir_tree_find(tree, FIFF.FIFFB_ICA)
     if len(ica_data) == 0:
@@ -2118,8 +1913,13 @@ def read_ica(fname):
         logger.info('Reading whitener drawn from noise covariance ...')
 
     logger.info('Now restoring ICA solution ...')
+
     # make sure dtypes are np.float64 to satisfy fast_dot
-    f = lambda x: x.astype(np.float64)
+    def f(x):
+        return x.astype(np.float64)
+
+    ica_init = dict((k, v) for k, v in ica_init.items()
+                    if k in getargspec(ICA.__init__).args)
     ica = ICA(**ica_init)
     ica.current_fit = current_fit
     ica.ch_names = ch_names.split(':')
@@ -2148,6 +1948,7 @@ def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
                       skew_criterion, kurt_criterion, var_criterion,
                       add_nodes):
     """Aux Function"""
+    from scipy import stats
 
     nodes = []
     if ecg_ch is not None:
@@ -2196,30 +1997,30 @@ def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
 @verbose
 def run_ica(raw, n_components, max_pca_components=100,
             n_pca_components=64, noise_cov=None, random_state=None,
-            algorithm='parallel', fun='logcosh', fun_args=None,
-            verbose=None, picks=None, start=None, stop=None, start_find=None,
+            picks=None, start=None, stop=None, start_find=None,
             stop_find=None, ecg_ch=None, ecg_score_func='pearsonr',
             ecg_criterion=0.1, eog_ch=None, eog_score_func='pearsonr',
             eog_criterion=0.1, skew_criterion=-1, kurt_criterion=-1,
-            var_criterion=0, add_nodes=None):
+            var_criterion=0, add_nodes=None, verbose=None):
     """Run ICA decomposition on raw data and identify artifact sources
 
     This function implements an automated artifact removal work flow.
 
     Hints and caveats:
-    - It is highly recommended to bandpass filter ECG and EOG
-    data and pass them instead of the channel names as ecg_ch and eog_ch
-    arguments.
-    - Please check your results. Detection by kurtosis and variance
-    can be powerful but misclassification of brain signals as
-    noise cannot be precluded. If you are not sure set those to None.
-    - Consider using shorter times for start_find and stop_find than
-    for start and stop. It can save you much time.
 
-    Example invocation (taking advantage of defaults):
+        - It is highly recommended to bandpass filter ECG and EOG
+          data and pass them instead of the channel names as ecg_ch and eog_ch
+          arguments.
+        - Please check your results. Detection by kurtosis and variance
+          can be powerful but misclassification of brain signals as
+          noise cannot be precluded. If you are not sure set those to None.
+        - Consider using shorter times for start_find and stop_find than
+          for start and stop. It can save you much time.
 
-    ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
-                  ecg_ch='MEG 1531', eog_ch='EOG 061')
+    Example invocation (taking advantage of defaults)::
+
+        ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
+                      ecg_ch='MEG 1531', eog_ch='EOG 061')
 
     Parameters
     ----------
@@ -2230,18 +2031,18 @@ def run_ica(raw, n_components, max_pca_components=100,
         smaller then max_pca_components. If None, all PCA components will be
         used. If float between 0 and 1 components can will be selected by the
         cumulative percentage of explained variance.
+    max_pca_components : int | None
+        The number of components used for PCA decomposition. If None, no
+        dimension reduction will be applied and max_pca_components will equal
+        the number of channels supplied on decomposing data.
     n_pca_components
         The number of PCA components used after ICA recomposition. The ensuing
         attribute allows to balance noise reduction against potential loss of
         features due to dimensionality reduction. If greater than
-        self.n_components_, the next 'n_pca_components' minus
-        'n_components_' PCA components will be added before restoring the
+        ``self.n_components_``, the next ``'n_pca_components'`` minus
+        ``'n_components_'`` PCA components will be added before restoring the
         sensor space data. The attribute gets updated each time the according
         parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
-    max_pca_components : int | None
-        The number of components used for PCA decomposition. If None, no
-        dimension reduction will be applied and max_pca_components will equal
-        the number of channels supplied on decomposing data.
     noise_cov : None | instance of mne.cov.Covariance
         Noise covariance used for whitening. If None, channels are just
         z-scored.
@@ -2249,21 +2050,6 @@ def run_ica(raw, n_components, max_pca_components=100,
         np.random.RandomState to initialize the FastICA estimation.
         As the estimation is non-deterministic it can be useful to
         fix the seed to have reproducible results.
-    algorithm : {'parallel', 'deflation'}
-        Apply parallel or deflational algorithm for FastICA
-    fun : string or function, optional. Default: 'logcosh'
-        The functional form of the G function used in the
-        approximation to neg-entropy. Could be either 'logcosh', 'exp',
-        or 'cube'.
-        You can also provide your own function. It should return a tuple
-        containing the value of the function, and of its derivative, in the
-        point.
-    fun_args: dictionary, optional
-        Arguments to send to the functional form.
-        If empty and if fun='logcosh', fun_args will take value
-        {'alpha' : 1.0}
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
     picks : array-like of int
         Channels to be included. This selection remains throughout the
         initialized ICA solution. If None only good data channels are used.
@@ -2284,11 +2070,11 @@ def run_ica(raw, n_components, max_pca_components=100,
         interpreted as time in seconds. If None, data will be used to the last
         sample.
     ecg_ch : str | ndarray | None
-        The `target` argument passed to ica.find_sources_raw. Either the
+        The ``target`` argument passed to ica.find_sources_raw. Either the
         name of the ECG channel or the ECG time series. If None, this step
         will be skipped.
     ecg_score_func : str | callable
-        The `score_func` argument passed to ica.find_sources_raw. Either
+        The ``score_func`` argument passed to ica.find_sources_raw. Either
         the name of function supported by ICA or a custom function.
     ecg_criterion : float | int | list-like | slice
         The indices of the sorted skewness scores. If float, sources with
@@ -2297,12 +2083,12 @@ def run_ica(raw, n_components, max_pca_components=100,
         E.g. range(2) would return the two sources with the highest score.
         If None, this step will be skipped.
     eog_ch : list | str | ndarray | None
-        The `target` argument or the list of target arguments subsequently
+        The ``target`` argument or the list of target arguments subsequently
         passed to ica.find_sources_raw. Either the name of the vertical EOG
         channel or the corresponding EOG time series. If None, this step
         will be skipped.
     eog_score_func : str | callable
-        The `score_func` argument passed to ica.find_sources_raw. Either
+        The ``score_func`` argument passed to ica.find_sources_raw. Either
         the name of function supported by ICA or a custom function.
     eog_criterion : float | int | list-like | slice
         The indices of the sorted skewness scores. If float, sources with
@@ -2333,8 +2119,12 @@ def run_ica(raw, n_components, max_pca_components=100,
         (name : str, target : str | array, score_func : callable,
         criterion : float | int | list-like | slice). This parameter is a
         generalization of the artifact specific parameters above and has
-        the same structure. Example:
-        add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+        the same structure. Example::
+
+            add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
+
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -2343,10 +2133,9 @@ def run_ica(raw, n_components, max_pca_components=100,
     """
     ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
               n_pca_components=n_pca_components, noise_cov=noise_cov,
-              random_state=random_state, algorithm=algorithm, fun=fun,
-              fun_args=fun_args, verbose=verbose)
+              random_state=random_state, verbose=verbose)
 
-    ica.decompose_raw(raw, start=start, stop=stop, picks=picks)
+    ica.fit(raw, start=start, stop=stop, picks=picks)
     logger.info('%s' % ica)
     logger.info('    Now searching for artifacts...')
 
@@ -2369,13 +2158,296 @@ def _band_pass_filter(ica, sources, target, l_freq, h_freq, verbose=None):
         logger.info('... filtering ICA sources')
         # use fft, here, steeper is better here.
         sources = band_pass_filter(sources, ica.info['sfreq'],
-                                   l_freq, h_freq,  method='fft',
+                                   l_freq, h_freq, method='fft',
                                    verbose=verbose)
         logger.info('... filtering target')
         target = band_pass_filter(target, ica.info['sfreq'],
-                                  l_freq, h_freq,  method='fft',
+                                  l_freq, h_freq, method='fft',
                                   verbose=verbose)
     elif l_freq is not None or h_freq is not None:
         raise ValueError('Must specify both pass bands')
 
     return sources, target
+
+
+# #############################################################################
+# CORRMAP
+
+def _get_ica_map(ica, components=None):
+    """Get ICA topomap for components"""
+    fast_dot = _get_fast_dot()
+    if components is None:
+        components = list(range(ica.n_components_))
+    maps = fast_dot(ica.mixing_matrix_[:, components].T,
+                    ica.pca_components_[:ica.n_components_])
+    return maps
+
+
+def _find_max_corrs(all_maps, target, threshold):
+    """Compute correlations between template and target components"""
+    all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
+    abs_corrs = [np.abs(a) for a in all_corrs]
+    corr_polarities = [np.sign(a) for a in all_corrs]
+
+    if threshold <= 1:
+        max_corrs = [list(np.nonzero(s_corr > threshold)[0])
+                     for s_corr in abs_corrs]
+    else:
+        max_corrs = [list(find_outliers(s_corr, threshold=threshold))
+                     for s_corr in abs_corrs]
+
+    am = [l[i] for l, i_s in zip(abs_corrs, max_corrs)
+          for i in i_s]
+    median_corr_with_target = np.median(am) if len(am) > 0 else 0
+
+    polarities = [l[i] for l, i_s in zip(corr_polarities, max_corrs)
+                  for i in i_s]
+
+    maxmaps = [l[i] for l, i_s in zip(all_maps, max_corrs)
+               for i in i_s]
+
+    if len(maxmaps) == 0:
+        return [], 0, 0, []
+    newtarget = np.zeros(maxmaps[0].size)
+    std_of_maps = np.std(np.asarray(maxmaps))
+    mean_of_maps = np.std(np.asarray(maxmaps))
+    for maxmap, polarity in zip(maxmaps, polarities):
+        newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
+
+    newtarget /= len(maxmaps)
+    newtarget *= std_of_maps
+
+    sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
+
+    return newtarget, median_corr_with_target, sim_i_o, max_corrs
+
+
+def _plot_corrmap(data, subjs, indices, ch_type, ica, label, show, outlines,
+                  layout, cmap, contours):
+    """Customized ica.plot_components for corrmap"""
+    import matplotlib.pyplot as plt
+
+    title = 'Detected components'
+    if label is not None:
+        title += ' of type ' + label
+
+    picks = list(range(len(data)))
+
+    p = 20
+    if len(picks) > p:  # plot components by sets of 20
+        n_components = len(picks)
+        figs = [_plot_corrmap(data[k:k + p], subjs[k:k + p],
+                indices[k:k + p], ch_type, ica, label, show,
+                outlines=outlines, layout=layout, cmap=cmap,
+                contours=contours)
+                for k in range(0, n_components, p)]
+        return figs
+    elif np.isscalar(picks):
+        picks = [picks]
+
+    data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(
+        ica, ch_type, layout)
+    pos, outlines = _check_outlines(pos, outlines)
+
+    data = np.atleast_2d(data)
+    data = data[:, data_picks]
+
+    # prepare data for iteration
+    fig, axes = _prepare_trellis(len(picks), max_col=5)
+    fig.suptitle(title)
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+    for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
+        ttl = 'Subj. {0}, IC {1}'.format(subject, idx)
+        ax.set_title(ttl, fontsize=12)
+        data_ = _merge_grad_data(data_) if merge_grads else data_
+        vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
+        plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
+                     res=64, axis=ax, cmap=cmap, outlines=outlines,
+                     image_mask=None, contours=contours, show=False,
+                     image_interp='bilinear')[0]
+        ax.set_yticks([])
+        ax.set_xticks([])
+        ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.8)
+    fig.canvas.draw()
+    if show is True:
+        plt.show()
+    return fig
+
+
+ at verbose
+def corrmap(icas, template, threshold="auto", label=None,
+            ch_type="eeg", plot=True, show=True, verbose=None, outlines='head',
+            layout=None, sensors=True, contours=6, cmap='RdBu_r'):
+    """Find similar Independent Components across subjects by map similarity.
+
+    Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
+    match to a supplied template. Typically, feed it a list of fitted ICAs and
+    a template IC, for example, the blink for the first subject, to identify
+    specific ICs across subjects.
+
+    The specific procedure consists of two iterations. In a first step, the
+    maps best correlating with the template are identified. In the step, the
+    analysis is repeated with the mean of the maps identified in the first
+    stage.
+
+    Outputs a list of fitted ICAs with the indices of the marked ICs in a
+    specified field.
+
+    The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
+
+    Parameters
+    ----------
+    icas : list of mne.preprocessing.ICA
+        A list of fitted ICA objects.
+    template : tuple
+        A tuple with two elements (int, int) representing the list indices of
+        the set from which the template should be chosen, and the template.
+        E.g., if template=(1, 0), the first IC of the 2nd ICA object is used.
+    threshold : "auto" | list of float | float
+        Correlation threshold for identifying ICs
+        If "auto", search for the best map by trying all correlations between
+        0.6 and 0.95. In the original proposal, lower values are considered,
+        but this is not yet implemented.
+        If list of floats, search for the best map in the specified range of
+        correlation strengths. As correlation values, must be between 0 and 1
+        If float > 0, select ICs correlating better than this.
+        If float > 1, use find_outliers to identify ICs within subjects (not in
+        original Corrmap)
+        Defaults to "auto".
+    label : None | str
+        If not None, categorised ICs are stored in a dictionary "labels_" under
+        the given name. Preexisting entries will be appended to
+        (excluding repeats), not overwritten. If None, a dry run is performed.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+            The channel type to plot. Defaults to 'eeg'.
+    plot : bool
+        Should constructed template and selected maps be plotted? Defaults
+        to True.
+    show : bool
+        Show figures if True.
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    outlines : 'head' | dict | None
+        The outlines to be drawn. If 'head', a head scheme will be drawn. If
+        dict, each key refers to a tuple of x and y positions. The values in
+        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Defaults to 'head'. If dict, the 'autoshrink' (bool) field will
+        trigger automated shrinking of the positions due to points outside the
+        outline. Moreover, a matplotlib patch object can be passed for
+        advanced masking options, either directly or as a function that returns
+        patches (required for multi-axis plots).
+    layout : None | Layout | list of Layout
+        Layout instance specifying sensor positions (does not need to be
+        specified for Neuromag data). Or a list of Layout if projections
+        are from different sensor types.
+    cmap : matplotlib colormap
+        Colormap.
+    sensors : bool | str
+        Add markers for sensor locations to the plot. Accepts matplotlib plot
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
+    contours : int | False | None
+        The number of contour lines to draw. If 0, no contours will be drawn.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    template_fig : fig
+        Figure showing the mean template.
+    labelled_ics : fig
+        Figure showing the labelled ICs in all ICA decompositions.
+    """
+    if not isinstance(plot, bool):
+        raise ValueError("`plot` must be of type `bool`")
+
+    if threshold == 'auto':
+        threshold = np.arange(60, 95, dtype=np.float64) / 100.
+
+    all_maps = [_get_ica_map(ica) for ica in icas]
+
+    target = all_maps[template[0]][template[1]]
+
+    if plot is True:
+        ttl = 'Template from subj. {0}'.format(str(template[0]))
+        template_fig = icas[template[0]].plot_components(
+            picks=template[1], ch_type=ch_type, title=ttl, outlines=outlines,
+            cmap=cmap, contours=contours, layout=layout, show=show)
+        template_fig.subplots_adjust(top=0.8)
+        template_fig.canvas.draw()
+
+    # first run: use user-selected map
+    if isinstance(threshold, (int, float)):
+        if len(all_maps) == 0 or len(target) == 0:
+            logger.info('No component detected using find_outliers.'
+                        ' Consider using threshold="auto"')
+            return icas
+        nt, mt, s, mx = _find_max_corrs(all_maps, target, threshold)
+    elif len(threshold) > 1:
+        paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
+        # find iteration with highest avg correlation with target
+        nt, mt, s, mx = paths[np.argmax([path[2] for path in paths])]
+
+    # second run: use output from first run
+    if isinstance(threshold, (int, float)):
+        if len(all_maps) == 0 or len(nt) == 0:
+            if threshold > 1:
+                logger.info('No component detected using find_outliers. '
+                            'Consider using threshold="auto"')
+            return icas
+        nt, mt, s, mx = _find_max_corrs(all_maps, nt, threshold)
+    elif len(threshold) > 1:
+        paths = [_find_max_corrs(all_maps, nt, t) for t in threshold]
+        # find iteration with highest avg correlation with target
+        nt, mt, s, mx = paths[np.argmax([path[1] for path in paths])]
+
+    allmaps, indices, subjs, nones = [list() for _ in range(4)]
+    logger.info('Median correlation with constructed map: %0.3f' % mt)
+    if plot is True:
+        logger.info('Displaying selected ICs per subject.')
+
+    for ii, (ica, max_corr) in enumerate(zip(icas, mx)):
+        if (label is not None) and (not hasattr(ica, 'labels_')):
+            ica.labels_ = dict()
+        if len(max_corr) > 0:
+            if isinstance(max_corr[0], np.ndarray):
+                max_corr = max_corr[0]
+            if label is not None:
+                ica.labels_[label] = list(set(list(max_corr) +
+                                          ica.labels_.get(label, list())))
+            if plot is True:
+                allmaps.extend(_get_ica_map(ica, components=max_corr))
+                subjs.extend([ii] * len(max_corr))
+                indices.extend(max_corr)
+        else:
+            if (label is not None) and (label not in ica.labels_):
+                ica.labels_[label] = list()
+            nones.append(ii)
+
+    if len(nones) == 0:
+        logger.info('At least 1 IC detected for each subject.')
+    else:
+        logger.info('No maps selected for subject(s) ' +
+                    ', '.join([str(x) for x in nones]) +
+                    ', consider a more liberal threshold.')
+
+    if plot is True:
+        labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
+                                     label, outlines=outlines, cmap=cmap,
+                                     contours=contours, layout=layout,
+                                     show=show)
+        return template_fig, labelled_ics
+    else:
+        return None
diff --git a/mne/preprocessing/infomax_.py b/mne/preprocessing/infomax_.py
index ee06e9b..053efde 100644
--- a/mne/preprocessing/infomax_.py
+++ b/mne/preprocessing/infomax_.py
@@ -7,16 +7,16 @@
 import math
 
 import numpy as np
-from scipy.stats import kurtosis
 
-from ..utils import logger, verbose, check_random_state
+from ..utils import logger, verbose, check_random_state, random_permutation
 
 
 @verbose
 def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
             anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
             kurt_size=6000, ext_blocks=1, max_iter=200,
-            random_state=None, verbose=None):
+            random_state=None, blowup=1e4, blowup_fac=0.5, n_small_angle=20,
+            use_bias=True, verbose=None):
     """Run the (extended) Infomax ICA decomposition on raw data
 
     based on the publications of Bell & Sejnowski 1995 (Infomax)
@@ -56,10 +56,39 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
         The window size for kurtosis estimation. Only considered for extended
         Infomax.
     ext_blocks : int
-        The number of blocks after which to recompute Kurtosis.
         Only considered for extended Infomax.
+        If positive, it denotes the number of blocks after which to recompute
+        the Kurtosis, which is used to estimate the signs of the sources.
+        In this case the number of sub-gaussian sources is automatically
+        determined.
+        If negative, the number of sub-gaussian sources to be used is fixed
+        and equal to n_subgauss. In this case the Kurtosis is not estimated.
     max_iter : int
         The maximum number of iterations. Defaults to 200.
+    random_state : int | np.random.RandomState
+        If random_state is an int, use random_state as seed of the random
+        number generator.
+        If random_state is already a np.random.RandomState instance, use
+        random_state as random number generator.
+    blowup : float
+        The maximum difference allowed between two succesive estimations of the
+        unmixing matrix. Defaults to 1e4
+    blowup_fac : float
+        The factor by which the learning rate will be reduced if the
+        difference between two succesive estimations of the
+        unmixing matrix exceededs ``blowup``:
+            l_rate *= blowup_fac
+        Defaults to 0.5
+    n_small_angle : int | None
+        The maximum number of allowed steps in which the angle between two
+        succesive estimations of the unmixing matrix is less than
+        ``anneal_deg``.
+        If None, this parameter is not taken into account to stop the
+        iterations.
+        Defaults to 20
+    use_bias : bool
+        This quantity indicates if the bias should be computed.
+        Defaults to True
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -68,15 +97,13 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
     unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
         The linear unmixing operator.
     """
+    from scipy.stats import kurtosis
     rng = check_random_state(random_state)
 
     # define some default parameter
     max_weight = 1e8
     restart_fac = 0.9
     min_l_rate = 1e-10
-    blowup = 1e4
-    blowup_fac = 0.5
-    n_small_angle = 20
     degconst = 180.0 / np.pi
 
     # for extended Infomax
@@ -84,8 +111,6 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
     signsbias = 0.02
     signcount_threshold = 25
     signcount_step = 2
-    if ext_blocks > 0:  # allow not to recompute kurtosis
-        n_subgauss = 1  # but initialize n_subgauss to 1 if you recompute
 
     # check data shape
     n_samples, n_features = data.shape
@@ -122,23 +147,25 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
     wts_blowup = False
     blockno = 0
     signcount = 0
+    initial_ext_blocks = ext_blocks   # save the initial value in case of reset
 
     # for extended Infomax
     if extended is True:
-        signs = np.identity(n_features)
-        signs.flat[slice(0, n_features * n_subgauss, n_features)]
+        signs = np.ones(n_features)
+
+        for k in range(n_subgauss):
+            signs[k] = -1
+
         kurt_size = min(kurt_size, n_samples)
         old_kurt = np.zeros(n_features, dtype=np.float64)
-        oldsigns = np.zeros((n_features, n_features))
+        oldsigns = np.zeros(n_features)
 
     # trainings loop
     olddelta, oldchange = 1., 0.
     while step < max_iter:
 
         # shuffle data at each step
-        rng.seed(step)  # --> permutation is fixed but differs at each step
-        permute = list(range(n_samples))
-        rng.shuffle(permute)
+        permute = random_permutation(n_samples, rng)
 
         # ICA training block
         # loop across block samples
@@ -150,19 +177,24 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
                 # extended ICA update
                 y = np.tanh(u)
                 weights += l_rate * np.dot(weights,
-                                           BI - np.dot(np.dot(u.T, y), signs) -
+                                           BI -
+                                           signs[None, :] * np.dot(u.T, y) -
                                            np.dot(u.T, u))
-                bias += l_rate * np.reshape(np.sum(y, axis=0,
-                                            dtype=np.float64) * -2.0,
-                                            (n_features, 1))
+                if use_bias:
+                    bias += l_rate * np.reshape(np.sum(y, axis=0,
+                                                dtype=np.float64) * -2.0,
+                                                (n_features, 1))
 
             else:
                 # logistic ICA weights update
                 y = 1.0 / (1.0 + np.exp(-u))
                 weights += l_rate * np.dot(weights,
                                            BI + np.dot(u.T, (1.0 - 2.0 * y)))
-                bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
-                                            dtype=np.float64), (n_features, 1))
+
+                if use_bias:
+                    bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
+                                                dtype=np.float64),
+                                                (n_features, 1))
 
             # check change limit
             max_weight_val = np.max(np.abs(weights))
@@ -176,9 +208,8 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
             # ICA kurtosis estimation
             if extended is True:
 
-                n = np.fix(blockno / ext_blocks)
+                if ext_blocks > 0 and blockno % ext_blocks == 0:
 
-                if np.abs(n) * ext_blocks == blockno:
                     if kurt_size < n_samples:
                         rp = np.floor(rng.uniform(0, 1, kurt_size) *
                                       (n_samples - 1))
@@ -195,11 +226,9 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
                         old_kurt = kurt
 
                     # estimate weighted signs
-                    signs.flat[::n_features + 1] = ((kurt + signsbias) /
-                                                    np.abs(kurt + signsbias))
+                    signs = np.sign(kurt + signsbias)
 
-                    ndiff = ((signs.flat[::n_features + 1] -
-                              oldsigns.flat[::n_features + 1]) != 0).sum()
+                    ndiff = (signs - oldsigns != 0).sum()
                     if ndiff == 0:
                         signcount += 1
                     else:
@@ -219,11 +248,16 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
             angledelta = 0.0
             delta = oldwtchange.reshape(1, n_features_square)
             change = np.sum(delta * delta, dtype=np.float64)
-            if step > 1:
+            if step > 2:
                 angledelta = math.acos(np.sum(delta * olddelta) /
                                        math.sqrt(change * oldchange))
                 angledelta *= degconst
 
+            if verbose:
+                logger.info(
+                    'step %d - lrate %5f, wchange %8.8f, angledelta %4.1f deg'
+                    % (step, l_rate, change, angledelta))
+
             # anneal learning rate
             oldweights = weights.copy()
             if angledelta > anneal_deg:
@@ -236,9 +270,11 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
                 if step == 1:  # on first step only
                     olddelta = delta  # initialize
                     oldchange = change
-                count_small_angle += 1
-                if count_small_angle > n_small_angle:
-                    max_iter = step
+
+                if n_small_angle is not None:
+                    count_small_angle += 1
+                    if count_small_angle > n_small_angle:
+                        max_iter = step
 
             # apply stopping rule
             if step > 2 and change < w_change:
@@ -258,11 +294,14 @@ def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
             olddelta = np.zeros((1, n_features_square), dtype=np.float64)
             bias = np.zeros((n_features, 1), dtype=np.float64)
 
+            ext_blocks = initial_ext_blocks
+
             # for extended Infomax
             if extended:
-                signs = np.identity(n_features)
-                signs.flat[slice(0, n_features * n_subgauss, n_features)]
-                oldsigns = np.zeros((n_features, n_features))
+                signs = np.ones(n_features)
+                for k in range(n_subgauss):
+                    signs[k] = -1
+                oldsigns = np.zeros(n_features)
 
             if l_rate > min_l_rate:
                 if verbose:
diff --git a/mne/preprocessing/maxfilter.py b/mne/preprocessing/maxfilter.py
index 8b6ce1e..2d76a44 100644
--- a/mne/preprocessing/maxfilter.py
+++ b/mne/preprocessing/maxfilter.py
@@ -7,84 +7,12 @@
 from ..externals.six import string_types
 import os
 from warnings import warn
-import logging
 
-import numpy as np
-from scipy import optimize, linalg
 
+from ..bem import fit_sphere_to_headshape
 from ..io import Raw
-from ..io.constants import FIFF
 from ..utils import logger, verbose
 from ..externals.six.moves import map
-from ..externals.six.moves import zip
-
-
- at verbose
-def fit_sphere_to_headshape(info, verbose=None):
-    """ Fit a sphere to the headshape points to determine head center for
-        maxfilter.
-
-    Parameters
-    ----------
-    info : dict
-        Measurement info.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Returns
-    -------
-    radius : float
-        Sphere radius in mm.
-    origin_head: ndarray
-        Head center in head coordinates (mm).
-    origin_device: ndarray
-        Head center in device coordinates (mm).
-
-    """
-    # get head digization points, excluding some frontal points (nose etc.)
-    hsp = [p['r'] for p in info['dig'] if p['kind'] == FIFF.FIFFV_POINT_EXTRA
-           and not (p['r'][2] < 0 and p['r'][1] > 0)]
-
-    if len(hsp) == 0:
-        raise ValueError('No head digitization points found')
-
-    hsp = 1e3 * np.array(hsp)
-
-    # initial guess for center and radius
-    xradius = (np.max(hsp[:, 0]) - np.min(hsp[:, 0])) / 2
-    yradius = (np.max(hsp[:, 1]) - np.min(hsp[:, 1])) / 2
-
-    radius_init = (xradius + yradius) / 2
-    center_init = np.array([0.0, 0.0, np.max(hsp[:, 2]) - radius_init])
-
-    # optimization
-    x0 = np.r_[center_init, radius_init]
-    cost_fun = lambda x, hsp:\
-        np.sum((np.sqrt(np.sum((hsp - x[:3]) ** 2, axis=1)) - x[3]) ** 2)
-
-    disp = True if logger.level <= logging.INFO else False
-    x_opt = optimize.fmin_powell(cost_fun, x0, args=(hsp,), disp=disp)
-
-    origin_head = x_opt[:3]
-    radius = x_opt[3]
-
-    # compute origin in device coordinates
-    trans = info['dev_head_t']
-    if trans['from'] != FIFF.FIFFV_COORD_DEVICE\
-        or trans['to'] != FIFF.FIFFV_COORD_HEAD:
-            raise RuntimeError('device to head transform not found')
-
-    head_to_dev = linalg.inv(trans['trans'])
-    origin_device = 1e3 * np.dot(head_to_dev,
-                                 np.r_[1e-3 * origin_head, 1.0])[:3]
-
-    logger.info('Fitted sphere: r = %0.1f mm' % radius)
-    logger.info('Origin head coordinates: %0.1f %0.1f %0.1f mm' %
-                (origin_head[0], origin_head[1], origin_head[2]))
-    logger.info('Origin device coordinates: %0.1f %0.1f %0.1f mm' %
-                (origin_device[0], origin_device[1], origin_device[2]))
-
-    return radius, origin_head, origin_device
 
 
 def _mxwarn(msg):
@@ -287,7 +215,11 @@ def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
         os.remove(out_fname)
 
     logger.info('Running MaxFilter: %s ' % cmd)
-    st = os.system(cmd)
+    if os.getenv('_MNE_MAXFILTER_TEST', '') != 'true':  # fake maxfilter
+        st = os.system(cmd)
+    else:
+        print(cmd)  # we can check the output
+        st = 0
     if st != 0:
         raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
     logger.info('[done]')
diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py
new file mode 100644
index 0000000..51d3a4d
--- /dev/null
+++ b/mne/preprocessing/maxwell.py
@@ -0,0 +1,644 @@
+# Authors: Mark Wronkiewicz <wronk.mark at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#          Jussi Nurminen <jnu at iki.fi>
+
+
+# License: BSD (3-clause)
+
+from __future__ import division
+import numpy as np
+from scipy import linalg
+from math import factorial
+import inspect
+
+from .. import pick_types
+from ..forward._compute_forward import _concatenate_coils
+from ..forward._make_forward import _prep_meg_channels
+from ..io.write import _generate_meas_id, _date_now
+from ..utils import verbose, logger
+
+
+ at verbose
+def _maxwell_filter(raw, origin=(0, 0, 40), int_order=8, ext_order=3,
+                    st_dur=None, st_corr=0.98, verbose=None):
+    """Apply Maxwell filter to data using spherical harmonics.
+
+    Parameters
+    ----------
+    raw : instance of mne.io.Raw
+        Data to be filtered
+    origin : array-like, shape (3,)
+        Origin of internal and external multipolar moment space in head coords
+        and in millimeters
+    int_order : int
+        Order of internal component of spherical expansion
+    ext_order : int
+        Order of external component of spherical expansion
+    st_dur : float | None
+        If not None, apply spatiotemporal SSS with specified buffer duration
+        (in seconds). Elekta's default is 10.0 seconds in MaxFilter v2.2.
+        Spatiotemporal SSS acts as implicitly as a high-pass filter where the
+        cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
+        buffers are generally better as long as your system can handle the
+        higher memory usage. To ensure that each window is processed
+        identically, choose a buffer length that divides evenly into your data.
+        Any data at the trailing edge that doesn't fit evenly into a whole
+        buffer window will be lumped into the previous buffer.
+    st_corr : float
+        Correlation limit between inner and outer subspaces used to reject
+        ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose)
+
+    Returns
+    -------
+    raw_sss : instance of mne.io.Raw
+        The raw data with Maxwell filtering applied
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    Equation numbers refer to Taulu and Kajola, 2005 [1]_ unless otherwise
+    noted.
+
+    Some of this code was adapted and relicensed (with BSD form) with
+    permission from Jussi Nurminen.
+
+    References
+    ----------
+    .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
+           multichannel data: The signal space separation method,"
+           Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
+
+           http://lib.tkk.fi/Diss/2008/isbn9789512295654/article2.pdf
+
+    .. [2] Taulu S. and Simola J. "Spatiotemporal signal space separation
+           method for rejecting nearby interference in MEG measurements,"
+           Physics in Medicine and Biology, vol. 51, pp. 1759-1768, 2006.
+
+           http://lib.tkk.fi/Diss/2008/isbn9789512295654/article3.pdf
+    """
+
+    # There are an absurd number of different possible notations for spherical
+    # coordinates, which confounds the notation for spherical harmonics.  Here,
+    # we purposefully stay away from shorthand notation in both and use
+    # explicit terms (like 'azimuth' and 'polar') to avoid confusion.
+    # See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
+    # Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
+
+    if raw.proj:
+        raise RuntimeError('Projectors cannot be applied to raw data.')
+    if len(raw.info.get('comps', [])) > 0:
+        raise RuntimeError('Maxwell filter cannot handle compensated '
+                           'channels.')
+    st_corr = float(st_corr)
+    if st_corr <= 0. or st_corr > 1.:
+        raise ValueError('Need 0 < st_corr <= 1., got %s' % st_corr)
+    logger.info('Bad channels being reconstructed: ' + str(raw.info['bads']))
+
+    logger.info('Preparing coil definitions')
+    all_coils, _, _, meg_info = _prep_meg_channels(raw.info, accurate=True,
+                                                   elekta_defs=True,
+                                                   verbose=False)
+    raw_sss = raw.copy().load_data()
+    del raw
+    times = raw_sss.times
+
+    # Get indices of channels to use in multipolar moment calculation
+    good_chs = pick_types(raw_sss.info, meg=True, exclude='bads')
+    # Get indices of MEG channels
+    meg_picks = pick_types(raw_sss.info, meg=True, exclude=[])
+    meg_coils, _, _, meg_info = _prep_meg_channels(raw_sss.info, accurate=True,
+                                                   elekta_defs=True)
+
+    # Magnetometers (with coil_class == 1.0) must be scaled by 100 to improve
+    # numerical stability as they have different scales than gradiometers
+    coil_scale = np.ones((len(meg_coils), 1))
+    coil_scale[np.array([coil['coil_class'] == 1.0
+                         for coil in meg_coils])] = 100.
+
+    # Compute multipolar moment bases
+    origin = np.array(origin) / 1000.  # Convert scale from mm to m
+    # Compute in/out bases and create copies containing only good chs
+    S_in, S_out = _sss_basis(origin, meg_coils, int_order, ext_order)
+    n_in = S_in.shape[1]
+
+    S_in_good, S_out_good = S_in[good_chs, :], S_out[good_chs, :]
+    S_in_good_norm = np.sqrt(np.sum(S_in_good * S_in_good, axis=0))[:,
+                                                                    np.newaxis]
+    S_out_good_norm = \
+        np.sqrt(np.sum(S_out_good * S_out_good, axis=0))[:, np.newaxis]
+    # Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
+    S_tot_good = np.c_[S_in_good, S_out_good]
+    S_tot_good /= np.sqrt(np.sum(S_tot_good * S_tot_good, axis=0))[np.newaxis,
+                                                                   :]
+    pS_tot_good = linalg.pinv(S_tot_good, cond=1e-15)
+
+    # Compute multipolar moments of (magnetometer scaled) data (Eq. 37)
+    # XXX eventually we can refactor this to work in chunks
+    data = raw_sss[good_chs][0]
+    mm = np.dot(pS_tot_good, data * coil_scale[good_chs])
+    # Reconstruct data from internal space (Eq. 38)
+    raw_sss._data[meg_picks] = np.dot(S_in, mm[:n_in] / S_in_good_norm)
+    raw_sss._data[meg_picks] /= coil_scale
+
+    # Reset 'bads' for any MEG channels since they've been reconstructed
+    bad_inds = [raw_sss.info['ch_names'].index(ch)
+                for ch in raw_sss.info['bads']]
+    raw_sss.info['bads'] = [raw_sss.info['ch_names'][bi] for bi in bad_inds
+                            if bi not in meg_picks]
+
+    # Reconstruct raw file object with spatiotemporal processed data
+    if st_dur is not None:
+        if st_dur > times[-1]:
+            raise ValueError('st_dur (%0.1fs) longer than length of signal in '
+                             'raw (%0.1fs).' % (st_dur, times[-1]))
+        logger.info('Processing data using tSSS with st_dur=%s' % st_dur)
+
+        # Generate time points to break up data in to windows
+        lims = raw_sss.time_as_index(np.arange(times[0], times[-1], st_dur))
+        len_last_buf = raw_sss.times[-1] - raw_sss.index_as_time(lims[-1])[0]
+        if len_last_buf == st_dur:
+            lims = np.concatenate([lims, [len(raw_sss.times)]])
+        else:
+            # len_last_buf < st_dur so fold it into the previous buffer
+            lims[-1] = len(raw_sss.times)
+            logger.info('Spatiotemporal window did not fit evenly into raw '
+                        'object. The final %0.2f seconds were lumped onto '
+                        'the previous window.' % len_last_buf)
+
+        # Loop through buffer windows of data
+        for win in zip(lims[:-1], lims[1:]):
+            # Reconstruct data from external space and compute residual
+            resid = data[:, win[0]:win[1]]
+            resid -= raw_sss._data[meg_picks, win[0]:win[1]]
+            resid -= np.dot(S_out, mm[n_in:, win[0]:win[1]] /
+                            S_out_good_norm) / coil_scale
+            _check_finite(resid)
+
+            # Compute SSP-like projector. Set overlap limit to 0.02
+            this_data = raw_sss._data[meg_picks, win[0]:win[1]]
+            _check_finite(this_data)
+            V = _overlap_projector(this_data, resid, st_corr)
+
+            # Apply projector according to Eq. 12 in [2]_
+            logger.info('    Projecting out %s tSSS components for %s-%s'
+                        % (V.shape[1], win[0] / raw_sss.info['sfreq'],
+                           win[1] / raw_sss.info['sfreq']))
+            this_data -= np.dot(np.dot(this_data, V), V.T)
+            raw_sss._data[meg_picks, win[0]:win[1]] = this_data
+
+    # Update info
+    raw_sss = _update_sss_info(raw_sss, origin, int_order, ext_order,
+                               len(good_chs))
+
+    return raw_sss
+
+
+def _check_finite(data):
+    """Helper to ensure data is finite"""
+    if not np.isfinite(data).all():
+        raise RuntimeError('data contains non-finite numbers')
+
+
+def _sph_harm(order, degree, az, pol):
+    """Evaluate point in specified multipolar moment. [1]_ Equation 4.
+
+    When using, pay close attention to inputs. Spherical harmonic notation for
+    order/degree, and theta/phi are both reversed in original SSS work compared
+    to many other sources. See mathworld.wolfram.com/SphericalHarmonic.html for
+    more discussion.
+
+    Note that scipy has ``scipy.special.sph_harm``, but that function is
+    too slow on old versions (< 0.15) and has a weird bug on newer versions.
+    At some point we should track it down and open a bug report...
+
+    Parameters
+    ----------
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    az : float
+        Azimuthal (longitudinal) spherical coordinate [0, 2*pi]. 0 is aligned
+        with x-axis.
+    pol : float
+        Polar (or colatitudinal) spherical coordinate [0, pi]. 0 is aligned
+        with z-axis.
+
+    Returns
+    -------
+    base : complex float
+        The spherical harmonic value at the specified azimuth and polar angles
+    """
+    from scipy.special import lpmv
+
+    # Error checks
+    if np.abs(order) > degree:
+        raise ValueError('Absolute value of expansion coefficient must be <= '
+                         'degree')
+    # Ensure that polar and azimuth angles are arrays
+    az = np.asarray(az)
+    pol = np.asarray(pol)
+    if (az < -2 * np.pi).any() or (az > 2 * np.pi).any():
+        raise ValueError('Azimuth coords must lie in [-2*pi, 2*pi]')
+    if(pol < 0).any() or (pol > np.pi).any():
+        raise ValueError('Polar coords must lie in [0, pi]')
+
+    base = np.sqrt((2 * degree + 1) / (4 * np.pi) * factorial(degree - order) /
+                   factorial(degree + order)) * \
+        lpmv(order, degree, np.cos(pol)) * np.exp(1j * order * az)
+    return base
+
+
+def _sss_basis(origin, coils, int_order, ext_order):
+    """Compute SSS basis for given conditions.
+
+    Parameters
+    ----------
+    origin : ndarray, shape (3,)
+        Origin of the multipolar moment space in millimeters
+    coils : list
+        List of MEG coils. Each should contain coil information dict. All
+        position info must be in the same coordinate frame as 'origin'
+    int_order : int
+        Order of the internal multipolar moment space
+    ext_order : int
+        Order of the external multipolar moment space
+
+    Returns
+    -------
+    bases: tuple, len (2)
+        Internal and external basis sets ndarrays with shape
+        (n_coils, n_mult_moments)
+    """
+    r_int_pts, ncoils, wcoils, counts = _concatenate_coils(coils)
+    bins = np.repeat(np.arange(len(counts)), counts)
+    n_sens = len(counts)
+    n_bases = get_num_moments(int_order, ext_order)
+    # int_lens = np.insert(np.cumsum(counts), obj=0, values=0)
+
+    S_in = np.empty((n_sens, (int_order + 1) ** 2 - 1))
+    S_out = np.empty((n_sens, (ext_order + 1) ** 2 - 1))
+    S_in.fill(np.nan)
+    S_out.fill(np.nan)
+
+    # Set all magnetometers (with 'coil_type' == 1.0) to be scaled by 100
+    coil_scale = np.ones((len(coils)))
+    coil_scale[np.array([coil['coil_class'] == 1.0 for coil in coils])] = 100.
+
+    if n_bases > n_sens:
+        raise ValueError('Number of requested bases (%s) exceeds number of '
+                         'sensors (%s)' % (str(n_bases), str(n_sens)))
+
+    # Compute position vector between origin and coil integration pts
+    cvec_cart = r_int_pts - origin[np.newaxis, :]
+    # Convert points to spherical coordinates
+    cvec_sph = _cart_to_sph(cvec_cart)
+
+    # Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
+    for spc, g_func, order in zip([S_in, S_out],
+                                  [_grad_in_components, _grad_out_components],
+                                  [int_order, ext_order]):
+        for deg in range(1, order + 1):
+            for order in range(-deg, deg + 1):
+
+                # Compute gradient for all integration points
+                grads = -1 * g_func(deg, order, cvec_sph[:, 0], cvec_sph[:, 1],
+                                    cvec_sph[:, 2])
+
+                # Gradients dotted with integration point normals and weighted
+                all_grads = wcoils * np.einsum('ij,ij->i', grads, ncoils)
+
+                # For order and degree, sum over each sensor's integration pts
+                # for pt_i in range(0, len(int_lens) - 1):
+                #    int_pts_sum = \
+                #        np.sum(all_grads[int_lens[pt_i]:int_lens[pt_i + 1]])
+                #    spc[pt_i, deg ** 2 + deg + order - 1] = int_pts_sum
+                spc[:, deg ** 2 + deg + order - 1] = \
+                    np.bincount(bins, weights=all_grads, minlength=len(counts))
+
+        # Scale magnetometers
+        spc *= coil_scale[:, np.newaxis]
+
+    return S_in, S_out
+
+
+def _alegendre_deriv(degree, order, val):
+    """Compute the derivative of the associated Legendre polynomial at a value.
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    val : float
+        Value to evaluate the derivative at
+
+    Returns
+    -------
+    dPlm : float
+        Associated Legendre function derivative
+    """
+    from scipy.special import lpmv
+
+    C = 1
+    if order < 0:
+        order = abs(order)
+        C = (-1) ** order * factorial(degree - order) / factorial(degree +
+                                                                  order)
+    return C * (order * val * lpmv(order, degree, val) + (degree + order) *
+                (degree - order + 1) * np.sqrt(1 - val ** 2) *
+                lpmv(order - 1, degree, val)) / (1 - val ** 2)
+
+
+def _grad_in_components(degree, order, rad, az, pol):
+    """Compute gradient of internal component of V(r) spherical expansion.
+
+    Internal component has form: Ylm(pol, az) / (rad ** (degree + 1))
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    rad : ndarray, shape (n_samples,)
+        Array of radii
+    az : ndarray, shape (n_samples,)
+        Array of azimuthal (longitudinal) spherical coordinates [0, 2*pi]. 0 is
+        aligned with x-axis.
+    pol : ndarray, shape (n_samples,)
+        Array of polar (or colatitudinal) spherical coordinates [0, pi]. 0 is
+        aligned with z-axis.
+
+    Returns
+    -------
+    grads : ndarray, shape (n_samples, 3)
+        Gradient of the spherical harmonic and vector specified in rectangular
+        coordinates
+    """
+    # Compute gradients for all spherical coordinates (Eq. 6)
+    g_rad = (-(degree + 1) / rad ** (degree + 2) *
+             _sph_harm(order, degree, az, pol))
+
+    g_az = (1 / (rad ** (degree + 2) * np.sin(pol)) * 1j * order *
+            _sph_harm(order, degree, az, pol))
+
+    g_pol = (1 / rad ** (degree + 2) *
+             np.sqrt((2 * degree + 1) * factorial(degree - order) /
+                     (4 * np.pi * factorial(degree + order))) *
+             np.sin(-pol) * _alegendre_deriv(degree, order, np.cos(pol)) *
+             np.exp(1j * order * az))
+
+    # Get real component of vectors, convert to cartesian coords, and return
+    real_grads = _get_real_grad(np.c_[g_rad, g_az, g_pol], order)
+    return _sph_to_cart_partials(np.c_[rad, az, pol], real_grads)
+
+
+def _grad_out_components(degree, order, rad, az, pol):
+    """Compute gradient of external component of V(r) spherical expansion.
+
+    External component has form: Ylm(azimuth, polar) * (radius ** degree)
+
+    Parameters
+    ----------
+    degree : int
+        Degree of spherical harmonic. (Usually) corresponds to 'l'
+    order : int
+        Order of spherical harmonic. (Usually) corresponds to 'm'
+    rad : ndarray, shape (n_samples,)
+        Array of radii
+    az : ndarray, shape (n_samples,)
+        Array of azimuthal (longitudinal) spherical coordinates [0, 2*pi]. 0 is
+        aligned with x-axis.
+    pol : ndarray, shape (n_samples,)
+        Array of polar (or colatitudinal) spherical coordinates [0, pi]. 0 is
+        aligned with z-axis.
+
+    Returns
+    -------
+    grads : ndarray, shape (n_samples, 3)
+        Gradient of the spherical harmonic and vector specified in rectangular
+        coordinates
+    """
+    # Compute gradients for all spherical coordinates (Eq. 7)
+    g_rad = degree * rad ** (degree - 1) * _sph_harm(order, degree, az, pol)
+
+    g_az = (rad ** (degree - 1) / np.sin(pol) * 1j * order *
+            _sph_harm(order, degree, az, pol))
+
+    g_pol = (rad ** (degree - 1) *
+             np.sqrt((2 * degree + 1) * factorial(degree - order) /
+                     (4 * np.pi * factorial(degree + order))) *
+             np.sin(-pol) * _alegendre_deriv(degree, order, np.cos(pol)) *
+             np.exp(1j * order * az))
+
+    # Get real component of vectors, convert to cartesian coords, and return
+    real_grads = _get_real_grad(np.c_[g_rad, g_az, g_pol], order)
+    return _sph_to_cart_partials(np.c_[rad, az, pol], real_grads)
+
+
+def _get_real_grad(grad_vec_raw, order):
+    """Helper function to convert gradient vector to to real basis functions.
+
+    Parameters
+    ----------
+    grad_vec_raw : ndarray, shape (n_gradients, 3)
+        Gradient array with columns for radius, azimuth, polar points
+    order : int
+        Order (usually 'm') of multipolar moment.
+
+    Returns
+    -------
+    grad_vec : ndarray, shape (n_gradients, 3)
+        Gradient vectors with only real componnet
+    """
+
+    if order > 0:
+        grad_vec = np.sqrt(2) * np.real(grad_vec_raw)
+    elif order < 0:
+        grad_vec = np.sqrt(2) * np.imag(grad_vec_raw)
+    else:
+        grad_vec = grad_vec_raw
+
+    return np.real(grad_vec)
+
+
+def get_num_moments(int_order, ext_order):
+    """Compute total number of multipolar moments. Equivalent to [1]_ Eq. 32.
+
+    Parameters
+    ----------
+    int_order : int
+        Internal expansion order
+    ext_order : int
+        External expansion order
+
+    Returns
+    -------
+    M : int
+        Total number of multipolar moments
+    """
+
+    # TODO: Eventually, reuse code in field_interpolation
+
+    return int_order ** 2 + 2 * int_order + ext_order ** 2 + 2 * ext_order
+
+
+def _sph_to_cart_partials(sph_pts, sph_grads):
+    """Convert spherical partial derivatives to cartesian coords.
+
+    Note: Because we are dealing with partial derivatives, this calculation is
+    not a static transformation. The transformation matrix itself is dependent
+    on azimuth and polar coord.
+
+    See the 'Spherical coordinate sytem' section here:
+    wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
+
+    Parameters
+    ----------
+    sph_pts : ndarray, shape (n_points, 3)
+        Array containing spherical coordinates points (rad, azimuth, polar)
+    sph_grads : ndarray, shape (n_points, 3)
+        Array containing partial derivatives at each spherical coordinate
+
+    Returns
+    -------
+    cart_grads : ndarray, shape (n_points, 3)
+        Array containing partial derivatives in Cartesian coordinates (x, y, z)
+    """
+
+    cart_grads = np.zeros_like(sph_grads)
+    c_as, s_as = np.cos(sph_pts[:, 1]), np.sin(sph_pts[:, 1])
+    c_ps, s_ps = np.cos(sph_pts[:, 2]), np.sin(sph_pts[:, 2])
+    trans = np.array([[c_as * s_ps, -s_as, c_as * c_ps],
+                      [s_as * s_ps, c_as, c_ps * s_as],
+                      [c_ps, np.zeros_like(c_as), -s_ps]])
+    cart_grads = np.einsum('ijk,kj->ki', trans, sph_grads)
+    return cart_grads
+
+
+def _cart_to_sph(cart_pts):
+    """Convert Cartesian coordinates to spherical coordinates.
+
+    Parameters
+    ----------
+    cart_pts : ndarray, shape (n_points, 3)
+        Array containing points in Cartesian coordinates (x, y, z)
+
+    Returns
+    -------
+    sph_pts : ndarray, shape (n_points, 3)
+        Array containing points in spherical coordinates (rad, azimuth, polar)
+    """
+
+    rad = np.sqrt(np.sum(cart_pts * cart_pts, axis=1))
+    az = np.arctan2(cart_pts[:, 1], cart_pts[:, 0])
+    pol = np.arccos(cart_pts[:, 2] / rad)
+
+    return np.c_[rad, az, pol]
+
+
+def _update_sss_info(raw, origin, int_order, ext_order, nsens):
+    """Helper function to update info after Maxwell filtering.
+
+    Parameters
+    ----------
+    raw : instance of mne.io.Raw
+        Data to be filtered
+    origin : array-like, shape (3,)
+        Origin of internal and external multipolar moment space in head coords
+        and in millimeters
+    int_order : int
+        Order of internal component of spherical expansion
+    ext_order : int
+        Order of external component of spherical expansion
+    nsens : int
+        Number of sensors
+
+    Returns
+    -------
+    raw : mne.io.Raw
+        raw file object with raw.info modified
+    """
+    from .. import __version__
+    # TODO: Continue to fill out bookkeeping info as additional features
+    # are added (fine calibration, cross-talk calibration, etc.)
+    int_moments = get_num_moments(int_order, 0)
+    ext_moments = get_num_moments(0, ext_order)
+
+    raw.info['maxshield'] = False
+    sss_info_dict = dict(in_order=int_order, out_order=ext_order,
+                         nsens=nsens, origin=origin.astype('float32'),
+                         n_int_moments=int_moments,
+                         frame=raw.info['dev_head_t']['to'],
+                         components=np.ones(int_moments +
+                                            ext_moments).astype('int32'))
+
+    max_info_dict = dict(max_st={}, sss_cal={}, sss_ctc={},
+                         sss_info=sss_info_dict)
+
+    block_id = _generate_meas_id()
+    proc_block = dict(max_info=max_info_dict, block_id=block_id,
+                      creator='mne-python v%s' % __version__,
+                      date=_date_now(), experimentor='')
+
+    # Insert information in raw.info['proc_info']
+    raw.info['proc_history'] = [proc_block] + raw.info.get('proc_history', [])
+    return raw
+
+
+check_disable = dict()  # not available on really old versions of SciPy
+if 'check_finite' in inspect.getargspec(linalg.svd)[0]:
+    check_disable['check_finite'] = False
+
+
+def _orth_overwrite(A):
+    """Helper to create a slightly more efficient 'orth'"""
+    # adapted from scipy/linalg/decomp_svd.py
+    u, s = linalg.svd(A, overwrite_a=True, full_matrices=False,
+                      **check_disable)[:2]
+    M, N = A.shape
+    eps = np.finfo(float).eps
+    tol = max(M, N) * np.amax(s) * eps
+    num = np.sum(s > tol, dtype=int)
+    return u[:, :num]
+
+
+def _overlap_projector(data_int, data_res, corr):
+    """Calculate projector for removal of subspace intersection in tSSS"""
+    # corr necessary to deal with noise when finding identical signal
+    # directions in the subspace. See the end of the Results section in [2]_
+
+    # Note that the procedure here is an updated version of [2]_ (and used in
+    # Elekta's tSSS) that uses residuals instead of internal/external spaces
+    # directly. This provides more degrees of freedom when analyzing for
+    # intersections between internal and external spaces.
+
+    # Normalize data, then compute orth to get temporal bases. Matrices
+    # must have shape (n_samps x effective_rank) when passed into svd
+    # computation
+    Q_int = linalg.qr(_orth_overwrite((data_int / np.linalg.norm(data_int)).T),
+                      overwrite_a=True, mode='economic', **check_disable)[0].T
+    Q_res = linalg.qr(_orth_overwrite((data_res / np.linalg.norm(data_res)).T),
+                      overwrite_a=True, mode='economic', **check_disable)[0]
+    assert data_int.shape[1] > 0
+    C_mat = np.dot(Q_int, Q_res)
+    del Q_int
+
+    # Compute angles between subspace and which bases to keep
+    S_intersect, Vh_intersect = linalg.svd(C_mat, overwrite_a=True,
+                                           full_matrices=False,
+                                           **check_disable)[1:]
+    del C_mat
+    intersect_mask = (S_intersect >= corr)
+    del S_intersect
+
+    # Compute projection operator as (I-LL_T) Eq. 12 in [2]_
+    # V_principal should be shape (n_time_pts x n_retained_inds)
+    Vh_intersect = Vh_intersect[intersect_mask].T
+    V_principal = np.dot(Q_res, Vh_intersect)
+    return V_principal
diff --git a/mne/preprocessing/peak_finder.py b/mne/preprocessing/peak_finder.py
index 052b3bf..a2e78fb 100644
--- a/mne/preprocessing/peak_finder.py
+++ b/mne/preprocessing/peak_finder.py
@@ -107,8 +107,8 @@ def peak_finder(x0, thresh=None, extrema=1, verbose=None):
             ii += 1  # This is a peak
             # Reset peak finding if we had a peak and the next peak is bigger
             # than the last or the left min was small enough to reset.
-            if found_peak and ((x[ii] > peak_mag[-1])
-                               or (left_min < peak_mag[-1] - thresh)):
+            if found_peak and ((x[ii] > peak_mag[-1]) or
+                               (left_min < peak_mag[-1] - thresh)):
                 temp_mag = min_mag
                 found_peak = False
 
diff --git a/mne/preprocessing/ssp.py b/mne/preprocessing/ssp.py
index f8abc73..63fac16 100644
--- a/mne/preprocessing/ssp.py
+++ b/mne/preprocessing/ssp.py
@@ -35,8 +35,7 @@ def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
                       filter_method, iir_params=None, verbose=None):
     """Compute SSP/PCA projections for ECG or EOG artifacts
 
-    Note: raw has to be constructed with preload=True (or string)
-    Warning: raw will be modified by this function
+    .. note:: raw data must be preloaded.
 
     Parameters
     ----------
@@ -68,9 +67,9 @@ def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
         Number of jobs to run in parallel.
     ch_name : string (or None)
         Channel to use for ECG event detection.
-    reject : dict
+    reject : dict | None
         Epoch rejection configuration (see Epochs).
-    flat : dict
+    flat : dict | None
         Epoch flat configuration (see Epochs).
     bads : list
         List with (additional) bad channels.
@@ -221,8 +220,7 @@ def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
                      iir_params=None, copy=True, verbose=None):
     """Compute SSP/PCA projections for ECG artifacts
 
-    Note: raw has to be constructed with preload=True (or string)
-    Warning: raw will be modified by this function
+    .. note:: raw data must be preloaded.
 
     Parameters
     ----------
@@ -252,9 +250,9 @@ def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
         Number of jobs to run in parallel.
     ch_name : string (or None)
         Channel to use for ECG detection (Required if no ECG found).
-    reject : dict
+    reject : dict | None
         Epoch rejection configuration (see Epochs).
-    flat : dict
+    flat : dict | None
         Epoch flat configuration (see Epochs).
     bads : list
         List with (additional) bad channels.
@@ -317,8 +315,7 @@ def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
                      iir_params=None, ch_name=None, copy=True, verbose=None):
     """Compute SSP/PCA projections for EOG artifacts
 
-    Note: raw has to be constructed with preload=True (or string)
-    Warning: raw will be modified by this function
+    .. note:: raw data must be preloaded.
 
     Parameters
     ----------
@@ -342,15 +339,13 @@ def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
         Filter high cut-off frequency in Hz.
     average : bool
         Compute SSP after averaging.
-    preload : string (or True)
-        Temporary file used during computaion.
     filter_length : str | int | None
         Number of taps to use for filtering.
     n_jobs : int
         Number of jobs to run in parallel.
-    reject : dict
+    reject : dict | None
         Epoch rejection configuration (see Epochs).
-    flat : dict
+    flat : dict | None
         Epoch flat configuration (see Epochs).
     bads : list
         List with (additional) bad channels.
@@ -372,10 +367,10 @@ def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
         Dictionary of parameters to use for IIR filtering.
         See mne.filter.construct_iir_filter for details. If iir_params
         is None and method="iir", 4th order Butterworth will be used.
-    copy : bool
-        If False, filtering raw data is done in place. Defaults to True.
     ch_name: str | None
         If not None, specify EOG channel name.
+    copy : bool
+        If False, filtering raw data is done in place. Defaults to True.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
diff --git a/mne/preprocessing/stim.py b/mne/preprocessing/stim.py
index 820830c..06fd200 100644
--- a/mne/preprocessing/stim.py
+++ b/mne/preprocessing/stim.py
@@ -3,67 +3,128 @@
 # License: BSD (3-clause)
 
 import numpy as np
-from scipy import signal, interpolate
+from ..evoked import Evoked
+from ..epochs import Epochs
+from ..io import Raw
+from ..event import find_events
 
-from .. import pick_types
+from ..io.pick import pick_channels
 
 
-def eliminate_stim_artifact(raw, events, event_id, tmin=-0.005,
-                            tmax=0.01, mode='linear'):
-    """Eliminates stimulations artifacts from raw data
+def _get_window(start, end):
+    """Return window which has length as much as parameter start - end"""
+    from scipy.signal import hann
+    window = 1 - np.r_[hann(4)[:2],
+                       np.ones(np.abs(end - start) - 4),
+                       hann(4)[-2:]].T
+    return window
 
-    The raw object will be modified in place (no copy)
+
+def _check_preload(inst):
+    """Check if inst.preload is False. If it is False, raising error"""
+    if inst.preload is False:
+        raise RuntimeError('Modifying data of Instance is only supported '
+                           'when preloading is used. Use preload=True '
+                           '(or string) in the constructor.')
+
+
+def _fix_artifact(data, window, picks, first_samp, last_samp, mode):
+    """Modify original data by using parameter data"""
+    from scipy.interpolate import interp1d
+    if mode == 'linear':
+        x = np.array([first_samp, last_samp])
+        f = interp1d(x, data[:, (first_samp, last_samp)])
+        xnew = np.arange(first_samp, last_samp)
+        interp_data = f(xnew)
+        data[picks, first_samp:last_samp] = interp_data
+    if mode == 'window':
+        data[picks, first_samp:last_samp] = \
+            data[picks, first_samp:last_samp] * window[np.newaxis, :]
+
+
+def fix_stim_artifact(inst, events=None, event_id=None, tmin=0.,
+                      tmax=0.01, mode='linear', stim_channel=None, copy=False):
+    """Eliminate stimulation's artifacts from instance
 
     Parameters
     ----------
-    raw : Raw object
-        raw data object.
+    inst : instance of Raw or Epochs or Evoked
+        The data.
     events : array, shape (n_events, 3)
-        The list of events.
+        The list of events. Required only when inst is Raw.
     event_id : int
         The id of the events generating the stimulation artifacts.
+        If None, read all events. Required only when inst is Raw.
     tmin : float
         Start time of the interpolation window in seconds.
     tmax : float
         End time of the interpolation window in seconds.
     mode : 'linear' | 'window'
-        way to fill the artifacted time interval.
+        Way to fill the artifacted time interval.
         'linear' does linear interpolation
         'window' applies a (1 - hanning) window.
+    stim_channel : str | None
+        Stim channel to use.
+    copy : bool
+        If True, data will be copied. Else data may be modified in place.
 
     Returns
     -------
-    raw: Raw object
-        raw data object.
+    inst : instance of Raw or Evoked or Epochs
+        Instance with modified data
     """
-    if not raw.preload:
-        raise RuntimeError('Modifying data of Raw is only supported '
-                           'when preloading is used. Use preload=True '
-                           '(or string) in the constructor.')
-    events_sel = (events[:, 2] == event_id)
-    event_start = events[events_sel, 0]
-    s_start = int(np.ceil(raw.info['sfreq'] * tmin))
-    s_end = int(np.ceil(raw.info['sfreq'] * tmax))
-
-    picks = pick_types(raw.info, meg=True, eeg=True, eog=True, ecg=True,
-                       emg=True, ref_meg=True, misc=True, chpi=True,
-                       exclude='bads', stim=False, resp=False)
+    if mode not in ('linear', 'window'):
+        raise ValueError("mode has to be 'linear' or 'window' (got %s)" % mode)
 
+    if copy:
+        inst = inst.copy()
+    s_start = int(np.ceil(inst.info['sfreq'] * tmin))
+    s_end = int(np.ceil(inst.info['sfreq'] * tmax))
+    if (mode == "window") and (s_end - s_start) < 4:
+        raise ValueError('Time range is too short. Use a larger interval '
+                         'or set mode to "linear".')
+    window = None
     if mode == 'window':
-        window = 1 - np.r_[signal.hann(4)[:2],
-                           np.ones(np.abs(s_end - s_start) - 4),
-                           signal.hann(4)[-2:]].T
-
-    for k in range(len(event_start)):
-        first_samp = int(event_start[k]) - raw.first_samp + s_start
-        last_samp = int(event_start[k]) - raw.first_samp + s_end
-        data, _ = raw[picks, first_samp:last_samp]
-        if mode == 'linear':
-            x = np.array([first_samp, last_samp])
-            f = interpolate.interp1d(x, data[:, (0, -1)])
-            xnew = np.arange(first_samp, last_samp)
-            interp_data = f(xnew)
-            raw[picks, first_samp:last_samp] = interp_data
-        elif mode == 'window':
-            raw[picks, first_samp:last_samp] = data * window[np.newaxis, :]
-    return raw
+        window = _get_window(s_start, s_end)
+    ch_names = inst.info['ch_names']
+    picks = pick_channels(ch_names, ch_names)
+
+    if isinstance(inst, Raw):
+        _check_preload(inst)
+        if events is None:
+            events = find_events(inst, stim_channel=stim_channel)
+        if len(events) == 0:
+            raise ValueError('No events are found')
+        if event_id is None:
+            events_sel = np.arange(len(events))
+        else:
+            events_sel = (events[:, 2] == event_id)
+        event_start = events[events_sel, 0]
+        data = inst._data
+        for event_idx in event_start:
+            first_samp = int(event_idx) - inst.first_samp + s_start
+            last_samp = int(event_idx) - inst.first_samp + s_end
+            _fix_artifact(data, window, picks, first_samp, last_samp, mode)
+
+    elif isinstance(inst, Epochs):
+        _check_preload(inst)
+        if inst.reject is not None:
+            raise RuntimeError('Reject is already applied. Use reject=None '
+                               'in the constructor.')
+        e_start = int(np.ceil(inst.info['sfreq'] * inst.tmin))
+        first_samp = s_start - e_start
+        last_samp = s_end - e_start
+        data = inst._data
+        for epoch in data:
+            _fix_artifact(epoch, window, picks, first_samp, last_samp, mode)
+
+    elif isinstance(inst, Evoked):
+        first_samp = s_start - inst.first
+        last_samp = s_end - inst.first
+        data = inst.data
+        _fix_artifact(data, window, picks, first_samp, last_samp, mode)
+
+    else:
+        raise TypeError('Not a Raw or Epochs or Evoked (got %s).' % type(inst))
+
+    return inst
diff --git a/mne/preprocessing/tests/data/eeglab_extended_infomax_results_eeg_data.mat b/mne/preprocessing/tests/data/eeglab_extended_infomax_results_eeg_data.mat
new file mode 100644
index 0000000..68d17b6
Binary files /dev/null and b/mne/preprocessing/tests/data/eeglab_extended_infomax_results_eeg_data.mat differ
diff --git a/mne/preprocessing/tests/data/eeglab_extended_infomax_results_meg_data.mat b/mne/preprocessing/tests/data/eeglab_extended_infomax_results_meg_data.mat
new file mode 100644
index 0000000..e661f30
Binary files /dev/null and b/mne/preprocessing/tests/data/eeglab_extended_infomax_results_meg_data.mat differ
diff --git a/mne/preprocessing/tests/data/eeglab_infomax_results_eeg_data.mat b/mne/preprocessing/tests/data/eeglab_infomax_results_eeg_data.mat
new file mode 100644
index 0000000..1bc7b0e
Binary files /dev/null and b/mne/preprocessing/tests/data/eeglab_infomax_results_eeg_data.mat differ
diff --git a/mne/preprocessing/tests/data/eeglab_infomax_results_meg_data.mat b/mne/preprocessing/tests/data/eeglab_infomax_results_meg_data.mat
new file mode 100644
index 0000000..7b9ff0a
Binary files /dev/null and b/mne/preprocessing/tests/data/eeglab_infomax_results_meg_data.mat differ
diff --git a/mne/preprocessing/tests/test_eeglab_infomax.py b/mne/preprocessing/tests/test_eeglab_infomax.py
new file mode 100644
index 0000000..99ef5af
--- /dev/null
+++ b/mne/preprocessing/tests/test_eeglab_infomax.py
@@ -0,0 +1,204 @@
+import numpy as np
+
+from scipy.linalg import svd
+
+from mne.io import Raw
+from mne import pick_types
+
+import scipy.io as sio
+from scipy.linalg import pinv
+from mne.preprocessing.infomax_ import infomax
+from numpy.testing import assert_almost_equal
+from mne.utils import random_permutation
+from mne.datasets import testing
+import os.path as op
+
+base_dir = op.join(op.dirname(__file__), 'data')
+
+
+def generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state):
+
+    data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
+    raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
+
+    raw = Raw(raw_fname, preload=True)
+
+    if ch_type == 'eeg':
+        picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    else:
+        picks = pick_types(raw.info, meg=ch_type,
+                           eeg=False, exclude='bads')
+
+    # select a small number of channels for the test
+    number_of_channels_to_use = 5
+    idx_perm = random_permutation(picks.shape[0], random_state)
+    picks = picks[idx_perm[:number_of_channels_to_use]]
+
+    raw.filter(1, 45, n_jobs=2)
+    X = raw[picks, :][0][:, ::20]
+
+    # Substract the mean
+    mean_X = X.mean(axis=1)
+    X -= mean_X[:, None]
+
+    # pre_whitening: z-score
+    X /= np.std(X)
+
+    T = X.shape[1]
+    cov_X = np.dot(X, X.T) / T
+
+    # Let's whiten the data
+    U, D, _ = svd(cov_X)
+    W = np.dot(U, U.T / np.sqrt(D)[:, None])
+    Y = np.dot(W, X)
+
+    return Y
+
+
+ at testing.requires_testing_data
+def test_mne_python_vs_eeglab():
+    """ Test eeglab vs mne_python infomax code.
+    """
+    random_state = 42
+
+    methods = ['infomax', 'infomax', 'extended_infomax', 'extended_infomax']
+    list_ch_types = ['eeg', 'mag', 'eeg', 'mag']
+
+    for method, ch_type in zip(methods, list_ch_types):
+
+        if method == 'infomax':
+            if ch_type == 'eeg':
+                eeglab_results_file = 'eeglab_infomax_results_eeg_data.mat'
+            elif ch_type == 'mag':
+                eeglab_results_file = 'eeglab_infomax_results_meg_data.mat'
+
+        elif method == 'extended_infomax':
+
+            if ch_type == 'eeg':
+                eeglab_results_file = ('eeglab_extended_infomax_results_eeg_'
+                                       'data.mat')
+            elif ch_type == 'mag':
+                eeglab_results_file = ('eeglab_extended_infomax_results_meg_'
+                                       'data.mat')
+
+        Y = generate_data_for_comparing_against_eeglab_infomax(ch_type,
+                                                               random_state)
+        N = Y.shape[0]
+        T = Y.shape[1]
+
+        # For comparasion against eeglab, make sure the folowing
+        # parameters have the same value in mne_python and eeglab:
+        #
+        # - starting point
+        # - random state
+        # - learning rate
+        # - block size
+        # - blowup parameter
+        # - blowup_fac parameter
+        # - tolerance for stopping the algorithm
+        # - number of iterations
+        # - anneal_step parameter
+        #
+        # Notes:
+        # * By default, eeglab whiten the data using the "sphering transform"
+        #   instead of pca. The mne_python infomax code does not
+        #   whiten the data. To make sure both mne_python and eeglab starts
+        #   from the same point (i.e., the same matrix), we need to make sure
+        #   to whiten the data outside, and pass these whiten data to
+        #   mne_python and eeglab. Finally, we need to tell eeglab that
+        #   the input data is already whiten, this can be done by calling
+        #   eeglab with the following syntax:
+        #
+        #   % Run infomax
+        #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
+        #       runica( Y, 'sphering', 'none');
+        #
+        #   % Run extended infomax
+        #   [unmixing,sphere,meanvar,bias,signs,lrates,sources,y]  = ...
+        #       runica( Y, 'sphering', 'none', 'extended', 1);
+        #
+        #   By calling eeglab using the former code, we are using its default
+        #   parameters, which are specified below in the section
+        #   "EEGLAB default parameters".
+        #
+        # * eeglab does not expose a parameter for fixing the random state.
+        #   Therefore, to accomplish this, we need to edit the runica.m
+        #   file located at /path_to_eeglab/functions/sigprocfunc/runica.m
+        #
+        #   i) Comment the line related with the random number generator
+        #      (line 812).
+        #   ii) Then, add the following line just below line 812:
+        #       rng(42); %use 42 as random seed.
+        #
+        # * eeglab does not have the parameter "n_small_angle",
+        #   so we need to disable it for making a fair comparison.
+        #
+        # * Finally, we need to take the unmixing matrix estimated by the
+        #   mne_python infomax implementation and order the components
+        #   in the same way that eeglab does. This is done below in the section
+        #   "Order the components in the same way that eeglab does".
+
+        ###############################################################
+        # EEGLAB default parameters
+        ###############################################################
+        l_rate_eeglab = 0.00065 / np.log(N)
+        block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))
+        blowup_eeglab = 1e9
+        blowup_fac_eeglab = 0.8
+        max_iter_eeglab = 512
+
+        if method == 'infomax':
+            anneal_step_eeglab = 0.9
+            use_extended = False
+
+        elif method == 'extended_infomax':
+            anneal_step_eeglab = 0.98
+            use_extended = True
+
+        if N > 32:
+            w_change_eeglab = 1e-7
+        else:
+            w_change_eeglab = 1e-6
+        ###############################################################
+
+        # Call mne_python infomax version using the following sintax
+        # to obtain the same result than eeglab version
+        unmixing = infomax(Y.T, extended=use_extended,
+                           random_state=random_state,
+                           max_iter=max_iter_eeglab,
+                           l_rate=l_rate_eeglab,
+                           block=block_eeglab,
+                           w_change=w_change_eeglab,
+                           blowup=blowup_eeglab,
+                           blowup_fac=blowup_fac_eeglab,
+                           n_small_angle=None,
+                           anneal_step=anneal_step_eeglab
+                           )
+
+        #######################################################################
+        # Order the components in the same way that eeglab does
+        #######################################################################
+
+        sources = np.dot(unmixing, Y)
+        mixing = pinv(unmixing)
+
+        mvar = np.sum(mixing ** 2, axis=0) * \
+            np.sum(sources ** 2, axis=1) / (N * T - 1)
+        windex = np.argsort(mvar)[::-1]
+
+        unmixing_ordered = unmixing[windex, :]
+        #######################################################################
+
+        #######################################################################
+        # Load the eeglab results, then compare the unmixing matrices estimated
+        # by mne_python and eeglab. To make the comparison use the
+        # \ell_inf norm:
+        # ||unmixing_mne_python - unmixing_eeglab||_inf
+        #######################################################################
+
+        eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))
+        unmixing_eeglab = eeglab_data['unmixing_eeglab']
+
+        maximum_difference = np.max(np.abs(unmixing_ordered - unmixing_eeglab))
+
+        assert_almost_equal(maximum_difference, 1e-12, decimal=10)
diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py
index c969b01..c5862ce 100644
--- a/mne/preprocessing/tests/test_ica.py
+++ b/mne/preprocessing/tests/test_ica.py
@@ -7,11 +7,9 @@ from __future__ import print_function
 
 import os
 import os.path as op
-from functools import wraps
 import warnings
 
 from nose.tools import assert_true, assert_raises, assert_equal
-from copy import deepcopy
 import numpy as np
 from numpy.testing import (assert_array_almost_equal, assert_array_equal,
                            assert_allclose)
@@ -22,13 +20,17 @@ from mne import io, Epochs, read_events, pick_types
 from mne.cov import read_cov
 from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
                                read_ica, run_ica)
-from mne.preprocessing.ica import score_funcs, _check_n_pca_components
+from mne.preprocessing.ica import get_score_funcs, corrmap
 from mne.io.meas_info import Info
-from mne.utils import set_log_file, check_sklearn_version, _TempDir
+from mne.utils import (set_log_file, _TempDir, requires_sklearn, slow_test,
+                       run_tests_if_main)
 
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+import matplotlib.pyplot as plt  # noqa
 
-tempdir = _TempDir()
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_dir, 'test_raw.fif')
@@ -37,9 +39,8 @@ evoked_nf_name = op.join(data_dir, 'test-nf-ave.fif')
 test_cov_name = op.join(data_dir, 'test-cov.fif')
 
 event_id, tmin, tmax = 1, -0.2, 0.2
-start, stop = 0, 6  # if stop is too small pca may fail in some cases, but
-                    # we're okay on this file
-
+# if stop is too small pca may fail in some cases, but we're okay on this file
+start, stop = 0, 6
 score_funcs_unsuited = ['pointbiserialr', 'ansari']
 try:
     from sklearn.utils.validation import NonBLASDotWarning
@@ -48,24 +49,12 @@ except:
     pass
 
 
-def requires_sklearn(function):
-    """Decorator to skip test if scikit-learn >= 0.12 is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        if not check_sklearn_version(min_version='0.12'):
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires scikit-learn >= 0.12'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-        return ret
-    return dec
-
-
 @requires_sklearn
 def test_ica_full_data_recovery():
     """Test recovery of full data when no source is rejected"""
     # Most basic recovery
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
     events = read_events(event_name)
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=False, exclude='bads')[:10]
@@ -122,7 +111,8 @@ def test_ica_full_data_recovery():
 def test_ica_rank_reduction():
     """Test recovery of full data when no source is rejected"""
     # Most basic recovery
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(0.5)
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=False, exclude='bads')[:10]
     n_components = 5
@@ -147,9 +137,38 @@ def test_ica_rank_reduction():
 
 
 @requires_sklearn
+def test_ica_reset():
+    """Test ICA resetting"""
+    raw = io.Raw(raw_fname).crop(0.5, stop, False)
+    raw.load_data()
+    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
+                       eog=False, exclude='bads')[:10]
+
+    run_time_attrs = (
+        '_pre_whitener',
+        'unmixing_matrix_',
+        'mixing_matrix_',
+        'n_components_',
+        'n_samples_',
+        'pca_components_',
+        'pca_explained_variance_',
+        'pca_mean_'
+    )
+    with warnings.catch_warnings(record=True):
+        ica = ICA(
+            n_components=3, max_pca_components=3, n_pca_components=3,
+            method='fastica', max_iter=1).fit(raw, picks=picks)
+
+    assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
+    ica._reset()
+    assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
+
+
+ at requires_sklearn
 def test_ica_core():
     """Test ICA on raw and epochs"""
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=False, exclude='bads')
     # XXX. The None cases helped revealing bugs but are time consuming.
@@ -174,10 +193,11 @@ def test_ica_core():
 
     # test essential core functionality
     for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
-      # Test ICA raw
+        # Test ICA raw
         ica = ICA(noise_cov=n_cov, n_components=n_comp,
                   max_pca_components=max_n, n_pca_components=max_n,
                   random_state=0, method=method, max_iter=1)
+        assert_raises(ValueError, ica.__contains__, 'mag')
 
         print(ica)  # to test repr
 
@@ -189,6 +209,7 @@ def test_ica_core():
         with warnings.catch_warnings(record=True):
             ica.fit(raw, picks=pcks, start=start, stop=stop)
             repr(ica)  # to test repr
+        assert_true('mag' in ica)  # should now work without error
 
         # test re-fit
         unmixing1 = ica.unmixing_matrix_
@@ -245,11 +266,14 @@ def test_ica_core():
     assert_raises(ValueError, ica.apply, offender)
 
 
+ at slow_test
 @requires_sklearn
 def test_ica_additional():
     """Test additional ICA functionality"""
+    tempdir = _TempDir()
     stop2 = 500
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=False, exclude='bads')
     test_cov = read_cov(test_cov_name)
@@ -258,13 +282,19 @@ def test_ica_additional():
                        eog=False, exclude='bads')
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True)
+    # test if n_components=None works
+    with warnings.catch_warnings(record=True):
+        ica = ICA(n_components=None,
+                  max_pca_components=None,
+                  n_pca_components=None, random_state=0)
+        ica.fit(epochs, picks=picks, decim=3)
     # for testing eog functionality
     picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
                         eog=True, exclude='bads')
     epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                         baseline=(None, 0), preload=True)
 
-    test_cov2 = deepcopy(test_cov)
+    test_cov2 = test_cov.copy()
     ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
               n_pca_components=4)
     assert_true(ica.info is None)
@@ -277,7 +307,16 @@ def test_ica_additional():
               n_pca_components=4)
     assert_raises(RuntimeError, ica.save, '')
     with warnings.catch_warnings(record=True):
-        ica.fit(raw, picks=None, start=start, stop=stop2)
+        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
+
+    # test corrmap
+    ica2 = ica.copy()
+    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
+            ch_type="mag")
+    corrmap([ica, ica2], (0, 0), threshold=2, plot=False)
+    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
+    assert_true(0 in ica.labels_["blinks"])
+    plt.close('all')
 
     # test warnings on bad filenames
     with warnings.catch_warnings(record=True) as w:
@@ -356,7 +395,10 @@ def test_ica_additional():
         # check type consistency
         attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                  'pca_explained_variance_ _pre_whitener')
-        f = lambda x, y: getattr(x, y).dtype
+
+        def f(x, y):
+            return getattr(x, y).dtype
+
         for attr in attrs.split():
             assert_equal(f(ica_read, attr), f(ica, attr))
 
@@ -385,7 +427,7 @@ def test_ica_additional():
 
     os.remove(test_ica_fname)
     # check scrore funcs
-    for name, func in score_funcs.items():
+    for name, func in get_score_funcs().items():
         if name in score_funcs_unsuited:
             continue
         scores = ica.score_sources(raw, target='EOG 061', score_func=func,
@@ -425,7 +467,7 @@ def test_ica_additional():
         assert_equal(len(scores[0]), ica.n_components_)
 
     # check score funcs
-    for name, func in score_funcs.items():
+    for name, func in get_score_funcs().items():
         if name in score_funcs_unsuited:
             continue
         scores = ica.score_sources(epochs_eog, target='EOG 061',
@@ -478,14 +520,14 @@ def test_ica_additional():
     ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
     assert_true(ica.n_components_ == len(ica_chans))
     assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
-    assert_true(ica_epochs.raw is None)
+    assert_true(ica_epochs._raw is None)
     assert_true(ica_epochs.preload is True)
 
     # test float n pca components
     ica.pca_explained_variance_ = np.array([0.2] * 5)
     ica.n_components_ = 0
     for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
-        ncomps_ = _check_n_pca_components(ica, ncomps)
+        ncomps_ = ica._check_n_pca_components(ncomps)
         assert_true(ncomps_ == expected)
 
 
@@ -507,7 +549,9 @@ def test_run_ica():
 @requires_sklearn
 def test_ica_reject_buffer():
     """Test ICA data raw buffer rejection"""
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    tempdir = _TempDir()
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
     picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=False, exclude='bads')
     ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
@@ -526,7 +570,8 @@ def test_ica_reject_buffer():
 @requires_sklearn
 def test_ica_twice():
     """Test running ICA twice"""
-    raw = io.Raw(raw_fname, preload=True).crop(0, stop, False).crop(1.5)
+    raw = io.Raw(raw_fname).crop(1.5, stop, False)
+    raw.load_data()
     picks = pick_types(raw.info, meg='grad', exclude='bads')
     n_components = 0.9
     max_pca_components = None
@@ -543,3 +588,5 @@ def test_ica_twice():
                    n_pca_components=1.0, random_state=0)
         ica2.fit(raw_new, picks=picks, decim=3)
         assert_equal(ica1.n_components_, ica2.n_components_)
+
+run_tests_if_main()
diff --git a/mne/preprocessing/tests/test_infomax.py b/mne/preprocessing/tests/test_infomax.py
index cdfdc9f..d8d9a72 100644
--- a/mne/preprocessing/tests/test_infomax.py
+++ b/mne/preprocessing/tests/test_infomax.py
@@ -14,7 +14,7 @@ from scipy import stats
 from scipy import linalg
 
 from mne.preprocessing.infomax_ import infomax
-from mne.utils import requires_sklearn
+from mne.utils import requires_sklearn, run_tests_if_main
 
 
 def center_and_norm(x, axis=-1):
@@ -34,14 +34,13 @@ def center_and_norm(x, axis=-1):
 
 
 @requires_sklearn
-def test_infomax_simple(add_noise=False):
-    """ Test the infomax algorithm on very simple data.
+def test_infomax_blowup():
+    """ Test the infomax algorithm blowup condition
     """
     from sklearn.decomposition import RandomizedPCA
-    rng = np.random.RandomState(0)
     # scipy.stats uses the global RNG:
     np.random.seed(0)
-    n_samples = 1000
+    n_samples = 100
     # Generate two sources:
     s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
     s2 = stats.t.rvs(1, size=n_samples)
@@ -55,44 +54,85 @@ def test_infomax_simple(add_noise=False):
                        [np.sin(phi), -np.cos(phi)]])
     m = np.dot(mixing, s)
 
-    if add_noise:
-        m += 0.1 * rng.randn(2, 1000)
-
     center_and_norm(m)
 
-    algos = [True, False]
-    for algo in algos:
-        X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
-        k_ = infomax(X, extended=algo)
-        s_ = np.dot(k_, X.T)
+    X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
+    k_ = infomax(X, extended=True, l_rate=0.1)
+    s_ = np.dot(k_, X.T)
 
-        center_and_norm(s_)
-        s1_, s2_ = s_
-        # Check to see if the sources have been estimated
-        # in the wrong order
-        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
-            s2_, s1_ = s_
-        s1_ *= np.sign(np.dot(s1_, s1))
-        s2_ *= np.sign(np.dot(s2_, s2))
+    center_and_norm(s_)
+    s1_, s2_ = s_
+    # Check to see if the sources have been estimated
+    # in the wrong order
+    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+        s2_, s1_ = s_
+    s1_ *= np.sign(np.dot(s1_, s1))
+    s2_ *= np.sign(np.dot(s2_, s2))
 
-        # Check that we have estimated the original sources
-        if not add_noise:
-            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
-            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
-        else:
-            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
-            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
+    # Check that we have estimated the original sources
+    assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+    assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
 
 
 @requires_sklearn
-def test_non_square_infomax(add_noise=False):
+def test_infomax_simple():
     """ Test the infomax algorithm on very simple data.
     """
     from sklearn.decomposition import RandomizedPCA
+    rng = np.random.RandomState(0)
+    # scipy.stats uses the global RNG:
+    np.random.seed(0)
+    n_samples = 500
+    # Generate two sources:
+    s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
+    s2 = stats.t.rvs(1, size=n_samples)
+    s = np.c_[s1, s2].T
+    center_and_norm(s)
+    s1, s2 = s
+
+    # Mixing angle
+    phi = 0.6
+    mixing = np.array([[np.cos(phi),  np.sin(phi)],
+                       [np.sin(phi), -np.cos(phi)]])
+    for add_noise in (False, True):
+        m = np.dot(mixing, s)
+        if add_noise:
+            m += 0.1 * rng.randn(2, n_samples)
+        center_and_norm(m)
+
+        algos = [True, False]
+        for algo in algos:
+            X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
+            k_ = infomax(X, extended=algo)
+            s_ = np.dot(k_, X.T)
+
+            center_and_norm(s_)
+            s1_, s2_ = s_
+            # Check to see if the sources have been estimated
+            # in the wrong order
+            if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+                s2_, s1_ = s_
+            s1_ *= np.sign(np.dot(s1_, s1))
+            s2_ *= np.sign(np.dot(s2_, s2))
+
+            # Check that we have estimated the original sources
+            if not add_noise:
+                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+            else:
+                assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
+                assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
+
+
+ at requires_sklearn
+def test_non_square_infomax():
+    """ Test non-square infomax
+    """
+    from sklearn.decomposition import RandomizedPCA
 
     rng = np.random.RandomState(0)
 
-    n_samples = 1000
+    n_samples = 200
     # Generate two sources:
     t = np.linspace(0, 100, n_samples)
     s1 = np.sin(t)
@@ -104,33 +144,36 @@ def test_non_square_infomax(add_noise=False):
     # Mixing matrix
     n_observed = 6
     mixing = rng.randn(n_observed, 2)
-    m = np.dot(mixing, s)
+    for add_noise in (False, True):
+        m = np.dot(mixing, s)
 
-    if add_noise:
-        m += 0.1 * rng.randn(n_observed, n_samples)
+        if add_noise:
+            m += 0.1 * rng.randn(n_observed, n_samples)
 
-    center_and_norm(m)
-    pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
-    m = m.T
-    m = pca.fit_transform(m)
-    # we need extended since input signals are sub-gaussian
-    unmixing_ = infomax(m, random_state=rng, extended=True)
-    s_ = np.dot(unmixing_, m.T)
-    # Check that the mixing model described in the docstring holds:
-    mixing_ = linalg.pinv(unmixing_.T)
+        center_and_norm(m)
+        pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
+        m = m.T
+        m = pca.fit_transform(m)
+        # we need extended since input signals are sub-gaussian
+        unmixing_ = infomax(m, random_state=rng, extended=True)
+        s_ = np.dot(unmixing_, m.T)
+        # Check that the mixing model described in the docstring holds:
+        mixing_ = linalg.pinv(unmixing_.T)
 
-    assert_almost_equal(m, s_.T.dot(mixing_))
+        assert_almost_equal(m, s_.T.dot(mixing_))
 
-    center_and_norm(s_)
-    s1_, s2_ = s_
-    # Check to see if the sources have been estimated
-    # in the wrong order
-    if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
-        s2_, s1_ = s_
-    s1_ *= np.sign(np.dot(s1_, s1))
-    s2_ *= np.sign(np.dot(s2_, s2))
+        center_and_norm(s_)
+        s1_, s2_ = s_
+        # Check to see if the sources have been estimated
+        # in the wrong order
+        if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
+            s2_, s1_ = s_
+        s1_ *= np.sign(np.dot(s1_, s1))
+        s2_ *= np.sign(np.dot(s2_, s2))
 
-    # Check that we have estimated the original sources
-    if not add_noise:
-        assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
-        assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+        # Check that we have estimated the original sources
+        if not add_noise:
+            assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
+            assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
+
+run_tests_if_main()
diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py
new file mode 100644
index 0000000..f2320dc
--- /dev/null
+++ b/mne/preprocessing/tests/test_maxwell.py
@@ -0,0 +1,256 @@
+# Author: Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose,
+                           assert_array_almost_equal)
+from nose.tools import assert_true, assert_raises
+
+from mne import compute_raw_covariance, pick_types
+from mne.cov import _estimate_rank_meeg_cov
+from mne.datasets import testing
+from mne.forward._make_forward import _prep_meg_channels
+from mne.io import Raw, proc_history
+from mne.preprocessing.maxwell import (_maxwell_filter as maxwell_filter,
+                                       get_num_moments, _sss_basis)
+from mne.utils import _TempDir, run_tests_if_main, slow_test
+
+warnings.simplefilter('always')  # Always throw warnings
+
+data_path = op.join(testing.data_path(download=False))
+raw_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+sss_std_fname = op.join(data_path, 'SSS',
+                        'test_move_anon_raw_simp_stdOrigin_sss.fif')
+sss_nonstd_fname = op.join(data_path, 'SSS',
+                           'test_move_anon_raw_simp_nonStdOrigin_sss.fif')
+sss_bad_recon_fname = op.join(data_path, 'SSS',
+                              'test_move_anon_raw_bad_recon_sss.fif')
+
+
+ at testing.requires_testing_data
+def test_maxwell_filter():
+    """Test multipolar moment and Maxwell filter"""
+
+    # TODO: Future tests integrate with mne/io/tests/test_proc_history
+
+    # Load testing data (raw, SSS std origin, SSS non-standard origin)
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
+    raw.load_data()
+    with warnings.catch_warnings(record=True):  # maxshield, naming
+        sss_std = Raw(sss_std_fname, allow_maxshield=True)
+        sss_nonStd = Raw(sss_nonstd_fname, allow_maxshield=True)
+        raw_err = Raw(raw_fname, proj=True,
+                      allow_maxshield=True).crop(0., 0.1, False)
+    assert_raises(RuntimeError, maxwell_filter, raw_err)
+
+    # Create coils
+    all_coils, _, _, meg_info = _prep_meg_channels(raw.info, ignore_ref=True,
+                                                   elekta_defs=True)
+    picks = [raw.info['ch_names'].index(ch) for ch in [coil['chname']
+                                                       for coil in all_coils]]
+    coils = [all_coils[ci] for ci in picks]
+    ncoils = len(coils)
+
+    int_order, ext_order = 8, 3
+    n_int_bases = int_order ** 2 + 2 * int_order
+    n_ext_bases = ext_order ** 2 + 2 * ext_order
+    nbases = n_int_bases + n_ext_bases
+
+    # Check number of bases computed correctly
+    assert_equal(get_num_moments(int_order, ext_order), nbases)
+
+    # Check multipolar moment basis set
+    S_in, S_out = _sss_basis(origin=np.array([0, 0, 40]), coils=coils,
+                             int_order=int_order, ext_order=ext_order)
+    assert_equal(S_in.shape, (ncoils, n_int_bases), 'S_in has incorrect shape')
+    assert_equal(S_out.shape, (ncoils, n_ext_bases),
+                 'S_out has incorrect shape')
+
+    # Test sss computation at the standard head origin
+    raw_sss = maxwell_filter(raw, origin=[0., 0., 40.],
+                             int_order=int_order, ext_order=ext_order)
+
+    sss_std_data = sss_std[picks][0]
+    assert_array_almost_equal(raw_sss[picks][0], sss_std_data,
+                              decimal=11, err_msg='Maxwell filtered data at '
+                              'standard origin incorrect.')
+
+    # Confirm SNR is above 100
+    bench_rms = np.sqrt(np.mean(sss_std_data * sss_std_data, axis=1))
+    error = raw_sss[picks][0] - sss_std_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) > 1000, 'SNR < 1000')
+
+    # Test sss computation at non-standard head origin
+    raw_sss = maxwell_filter(raw, origin=[0., 20., 20.],
+                             int_order=int_order, ext_order=ext_order)
+    sss_nonStd_data = sss_nonStd[picks][0]
+    assert_array_almost_equal(raw_sss[picks][0], sss_nonStd_data, decimal=11,
+                              err_msg='Maxwell filtered data at non-std '
+                              'origin incorrect.')
+    # Confirm SNR is above 100
+    bench_rms = np.sqrt(np.mean(sss_nonStd_data * sss_nonStd_data, axis=1))
+    error = raw_sss[picks][0] - sss_nonStd_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) > 1000, 'SNR < 1000')
+
+    # Check against SSS functions from proc_history
+    sss_info = raw_sss.info['proc_history'][0]['max_info']
+    assert_equal(get_num_moments(int_order, 0),
+                 proc_history._get_sss_rank(sss_info))
+
+    # Degenerate cases
+    raw_bad = raw.copy()
+    raw_bad.info['comps'] = [0]
+    assert_raises(RuntimeError, maxwell_filter, raw_bad)
+
+
+ at testing.requires_testing_data
+def test_maxwell_filter_additional():
+    """Test processing of Maxwell filtered data"""
+
+    # TODO: Future tests integrate with mne/io/tests/test_proc_history
+
+    # Load testing data (raw, SSS std origin, SSS non-standard origin)
+    data_path = op.join(testing.data_path(download=False))
+
+    file_name = 'test_move_anon'
+
+    raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
+
+    with warnings.catch_warnings(record=True):  # maxshield
+        # Use 2.0 seconds of data to get stable cov. estimate
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 2., False)
+
+    # Get MEG channels, compute Maxwell filtered data
+    raw.load_data()
+    raw.pick_types(meg=True, eeg=False)
+    int_order, ext_order = 8, 3
+    raw_sss = maxwell_filter(raw, int_order=int_order, ext_order=ext_order)
+
+    # Test io on processed data
+    tempdir = _TempDir()
+    test_outname = op.join(tempdir, 'test_raw_sss.fif')
+    raw_sss.save(test_outname)
+    raw_sss_loaded = Raw(test_outname, preload=True, proj=False,
+                         allow_maxshield=True)
+
+    # Some numerical imprecision since save uses 'single' fmt
+    assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
+                    rtol=1e-6, atol=1e-20)
+
+    # Test rank of covariance matrices for raw and SSS processed data
+    cov_raw = compute_raw_covariance(raw)
+    cov_sss = compute_raw_covariance(raw_sss)
+
+    scalings = None
+    cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
+    cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
+                                           scalings)
+
+    assert_equal(cov_raw_rank, raw.info['nchan'])
+    assert_equal(cov_sss_rank, get_num_moments(int_order, 0))
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_bads_reconstruction():
+    """Test reconstruction of channels marked as bad"""
+
+    with warnings.catch_warnings(record=True):  # maxshield, naming
+        sss_bench = Raw(sss_bad_recon_fname, allow_maxshield=True)
+
+    raw_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True).crop(0., 1., False)
+
+    # Set 30 random bad MEG channels (20 grad, 10 mag)
+    bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
+            'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
+            'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
+            'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
+            'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
+    raw.info['bads'] = bads
+
+    # Compute Maxwell filtered data
+    raw_sss = maxwell_filter(raw)
+    meg_chs = pick_types(raw_sss.info)
+    non_meg_chs = np.setdiff1d(np.arange(len(raw.ch_names)), meg_chs)
+    sss_bench_data = sss_bench[meg_chs][0]
+
+    # Some numerical imprecision since save uses 'single' fmt
+    assert_allclose(raw_sss[meg_chs][0], sss_bench_data,
+                    rtol=1e-12, atol=1e-4, err_msg='Maxwell filtered data '
+                    'with reconstructed bads is incorrect.')
+
+    # Confirm SNR is above 1000
+    bench_rms = np.sqrt(np.mean(raw_sss[meg_chs][0] ** 2, axis=1))
+    error = raw_sss[meg_chs][0] - sss_bench_data
+    error_rms = np.sqrt(np.mean(error ** 2, axis=1))
+    assert_true(np.mean(bench_rms / error_rms) >= 1000,
+                'SNR (%0.1f) < 1000' % np.mean(bench_rms / error_rms))
+    assert_allclose(raw_sss[non_meg_chs][0], raw[non_meg_chs][0])
+
+
+ at testing.requires_testing_data
+def test_spatiotemporal_maxwell():
+    """Test spatiotemporal (tSSS) processing"""
+    # Load raw testing data
+    with warnings.catch_warnings(record=True):  # maxshield
+        raw = Raw(raw_fname, allow_maxshield=True)
+
+    # Create coils
+    picks = pick_types(raw.info)
+
+    # Test that window is less than length of data
+    assert_raises(ValueError, maxwell_filter, raw, st_dur=1000.)
+
+    # Check both 4 and 10 seconds because Elekta handles them differently
+    # This is to ensure that std/non-std tSSS windows are correctly handled
+    st_durs = [4., 10.]
+    for st_dur in st_durs:
+        # Load tSSS data depending on st_dur and get data
+        tSSS_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_' +
+                             'spatiotemporal_%0ds_sss.fif' % st_dur)
+
+        with warnings.catch_warnings(record=True):  # maxshield, naming
+            tsss_bench = Raw(tSSS_fname, allow_maxshield=True)
+            # Because Elekta's tSSS sometimes(!) lumps the tail window of data
+            # onto the previous buffer if it's shorter than st_dur, we have to
+            # crop the data here to compensate for Elekta's tSSS behavior.
+            if st_dur == 10.:
+                tsss_bench.crop(0, st_dur, copy=False)
+        tsss_bench_data = tsss_bench[picks, :][0]
+        del tsss_bench
+
+        # Test sss computation at the standard head origin. Same cropping issue
+        # as mentioned above.
+        if st_dur == 10.:
+            raw_tsss = maxwell_filter(raw.crop(0, st_dur), st_dur=st_dur)
+        else:
+            raw_tsss = maxwell_filter(raw, st_dur=st_dur)
+        assert_allclose(raw_tsss[picks][0], tsss_bench_data,
+                        rtol=1e-12, atol=1e-4, err_msg='Spatiotemporal (tSSS) '
+                        'maxwell filtered data at standard origin incorrect.')
+
+        # Confirm SNR is above 500. Single precision is part of discrepancy
+        bench_rms = np.sqrt(np.mean(tsss_bench_data * tsss_bench_data, axis=1))
+        error = raw_tsss[picks][0] - tsss_bench_data
+        error_rms = np.sqrt(np.mean(error * error, axis=1))
+        assert_true(np.mean(bench_rms / error_rms) >= 500,
+                    'SNR (%0.1f) < 500' % np.mean(bench_rms / error_rms))
+
+    # Confirm we didn't modify other channels (like EEG chs)
+    non_picks = np.setdiff1d(np.arange(len(raw.ch_names)), picks)
+    assert_allclose(raw[non_picks, 0:raw_tsss.n_times][0],
+                    raw_tsss[non_picks, 0:raw_tsss.n_times][0])
+
+    # Degenerate cases
+    assert_raises(ValueError, maxwell_filter, raw, st_dur=10., st_corr=0.)
+
+run_tests_if_main()
diff --git a/mne/preprocessing/tests/test_ssp.py b/mne/preprocessing/tests/test_ssp.py
index ca6fae2..1d5cd0a 100644
--- a/mne/preprocessing/tests/test_ssp.py
+++ b/mne/preprocessing/tests/test_ssp.py
@@ -5,9 +5,10 @@ from nose.tools import assert_true, assert_equal
 from numpy.testing import assert_array_almost_equal
 import numpy as np
 
-from ...io import Raw
-from ...io.proj import make_projector, activate_proj
-from ..ssp import compute_proj_ecg, compute_proj_eog
+from mne.io import Raw
+from mne.io.proj import make_projector, activate_proj
+from mne.preprocessing.ssp import compute_proj_ecg, compute_proj_eog
+from mne.utils import run_tests_if_main
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -19,7 +20,8 @@ eog_times = np.array([0.5, 2.3, 3.6, 14.5])
 
 def test_compute_proj_ecg():
     """Test computation of ECG SSP projectors"""
-    raw = Raw(raw_fname, preload=True).crop(0, 10, False)
+    raw = Raw(raw_fname).crop(0, 10, False)
+    raw.load_data()
     for average in [False, True]:
         # For speed, let's not filter here (must also not reject then)
         projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
@@ -32,7 +34,7 @@ def test_compute_proj_ecg():
         # heart rate at least 0.5 Hz, but less than 3 Hz
         assert_true(events.shape[0] > 0.5 * dur_use and
                     events.shape[0] < 3 * dur_use)
-        #XXX: better tests
+        # XXX: better tests
 
         # without setting a bad channel, this should throw a warning
         with warnings.catch_warnings(record=True) as w:
@@ -48,7 +50,8 @@ def test_compute_proj_ecg():
 
 def test_compute_proj_eog():
     """Test computation of EOG SSP projectors"""
-    raw = Raw(raw_fname, preload=True).crop(0, 10, False)
+    raw = Raw(raw_fname).crop(0, 10, False)
+    raw.load_data()
     for average in [False, True]:
         n_projs_init = len(raw.info['projs'])
         projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
@@ -59,7 +62,7 @@ def test_compute_proj_eog():
         assert_true(len(projs) == (7 + n_projs_init))
         assert_true(np.abs(events.shape[0] -
                     np.sum(np.less(eog_times, dur_use))) <= 1)
-        #XXX: better tests
+        # XXX: better tests
 
         # This will throw a warning b/c simplefilter('always')
         with warnings.catch_warnings(record=True) as w:
@@ -75,7 +78,8 @@ def test_compute_proj_eog():
 
 def test_compute_proj_parallel():
     """Test computation of ExG projectors using parallelization"""
-    raw_0 = Raw(raw_fname, preload=True).crop(0, 10, False)
+    raw_0 = Raw(raw_fname).crop(0, 10, False)
+    raw_0.load_data()
     raw = raw_0.copy()
     projs, _ = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
                                 bads=['MEG 2443'], average=False,
@@ -95,3 +99,5 @@ def test_compute_proj_parallel():
     projs_2, _, _ = make_projector(projs_2, raw_2.info['ch_names'],
                                    bads=['MEG 2443'])
     assert_array_almost_equal(projs, projs_2, 10)
+
+run_tests_if_main()
diff --git a/mne/preprocessing/tests/test_stim.py b/mne/preprocessing/tests/test_stim.py
index 13af01c..eb290c4 100644
--- a/mne/preprocessing/tests/test_stim.py
+++ b/mne/preprocessing/tests/test_stim.py
@@ -6,71 +6,91 @@ import os.path as op
 
 import numpy as np
 from numpy.testing import assert_array_almost_equal
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_raises
 
 from mne.io import Raw
+from mne.io.pick import pick_types
 from mne.event import read_events
-from mne.preprocessing.stim import eliminate_stim_artifact
+from mne.epochs import Epochs
+from mne.preprocessing.stim import fix_stim_artifact
 
 data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(data_path, 'test_raw.fif')
 event_fname = op.join(data_path, 'test-eve.fif')
 
 
-def test_stim_elim():
-    """Test eliminate stim artifact"""
-    raw = Raw(raw_fname, preload=True)
+def test_fix_stim_artifact():
+    """Test fix stim artifact"""
     events = read_events(event_fname)
-    event_idx = np.where(events[:, 2] == 1)[0][0]
-    tidx = int(events[event_idx, 0] - raw.first_samp)
 
-    # use window around stimulus
-    tmin = -0.02
-    tmax = 0.02
-    test_tminidx = int(-0.01 * raw.info['sfreq'])
-    test_tmaxidx = int(0.01 * raw.info['sfreq'])
+    raw = Raw(raw_fname, preload=False)
+    assert_raises(RuntimeError, fix_stim_artifact, raw)
 
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='linear')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
-    diff_data0 = np.diff(data[0])
+    raw = Raw(raw_fname, preload=True)
+
+    # use window before stimulus in epochs
+    tmin, tmax, event_id = -0.2, 0.5, 1
+    picks = pick_types(raw.info, meg=True, eeg=True,
+                       eog=True, stim=False, exclude='bads')
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=None)
+    e_start = int(np.ceil(epochs.info['sfreq'] * epochs.tmin))
+    tmin, tmax = -0.045, -0.015
+    tmin_samp = int(-0.035 * epochs.info['sfreq']) - e_start
+    tmax_samp = int(-0.015 * epochs.info['sfreq']) - e_start
+
+    epochs = fix_stim_artifact(epochs, tmin=tmin, tmax=tmax, mode='linear')
+    data = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    diff_data0 = np.diff(data[0][0])
     diff_data0 -= np.mean(diff_data0)
     assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='window')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
-    assert_true(np.all(data) == 0.)
 
-    # use window before stimulus
-    tmin = -0.045
-    tmax = 0.015
-    test_tminidx = int(-0.035 * raw.info['sfreq'])
-    test_tmaxidx = int(-0.015 * raw.info['sfreq'])
+    epochs = fix_stim_artifact(epochs, tmin=tmin, tmax=tmax, mode='window')
+    data_from_epochs_fix = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    assert_true(np.all(data_from_epochs_fix) == 0.)
 
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='linear')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
+    # use window before stimulus in raw
+    event_idx = np.where(events[:, 2] == 1)[0][0]
+    tmin, tmax = -0.045, -0.015
+    tmin_samp = int(-0.035 * raw.info['sfreq'])
+    tmax_samp = int(-0.015 * raw.info['sfreq'])
+    tidx = int(events[event_idx, 0] - raw.first_samp)
+
+    assert_raises(ValueError, fix_stim_artifact, raw, events=np.array([]))
+    raw = fix_stim_artifact(raw, events=None, event_id=1, tmin=tmin,
+                            tmax=tmax, mode='linear', stim_channel='STI 014')
+    data, times = raw[:, (tidx + tmin_samp):(tidx + tmax_samp)]
     diff_data0 = np.diff(data[0])
     diff_data0 -= np.mean(diff_data0)
     assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='window')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
+
+    raw = fix_stim_artifact(raw, events, event_id=1, tmin=tmin,
+                            tmax=tmax, mode='window')
+    data, times = raw[:, (tidx + tmin_samp):(tidx + tmax_samp)]
     assert_true(np.all(data) == 0.)
 
+    # get epochs from raw with fixed data
+    tmin, tmax, event_id = -0.2, 0.5, 1
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, reject=None, baseline=None)
+    e_start = int(np.ceil(epochs.info['sfreq'] * epochs.tmin))
+    tmin_samp = int(-0.035 * epochs.info['sfreq']) - e_start
+    tmax_samp = int(-0.015 * epochs.info['sfreq']) - e_start
+    data_from_raw_fix = epochs.get_data()[:, :, tmin_samp:tmax_samp]
+    assert_true(np.all(data_from_raw_fix) == 0.)
+
     # use window after stimulus
-    tmin = 0.005
-    tmax = 0.045
-    test_tminidx = int(0.015 * raw.info['sfreq'])
-    test_tmaxidx = int(0.035 * raw.info['sfreq'])
-
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='linear')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
+    evoked = epochs.average()
+    tmin, tmax = 0.005, 0.045
+    tmin_samp = int(0.015 * evoked.info['sfreq']) - evoked.first
+    tmax_samp = int(0.035 * evoked.info['sfreq']) - evoked.first
+
+    evoked = fix_stim_artifact(evoked, tmin=tmin, tmax=tmax, mode='linear')
+    data = evoked.data[:, tmin_samp:tmax_samp]
     diff_data0 = np.diff(data[0])
     diff_data0 -= np.mean(diff_data0)
     assert_array_almost_equal(diff_data0, np.zeros(len(diff_data0)))
-    raw = eliminate_stim_artifact(raw, events, event_id=1, tmin=tmin,
-                                  tmax=tmax, mode='window')
-    data, times = raw[:, (tidx + test_tminidx):(tidx + test_tmaxidx)]
+
+    evoked = fix_stim_artifact(evoked, tmin=tmin, tmax=tmax, mode='window')
+    data = evoked.data[:, tmin_samp:tmax_samp]
     assert_true(np.all(data) == 0.)
diff --git a/mne/preprocessing/tests/test_xdawn.py b/mne/preprocessing/tests/test_xdawn.py
new file mode 100644
index 0000000..453ead0
--- /dev/null
+++ b/mne/preprocessing/tests/test_xdawn.py
@@ -0,0 +1,145 @@
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import os.path as op
+from nose.tools import (assert_equal, assert_raises)
+from numpy.testing import assert_array_equal
+from mne import (io, Epochs, read_events, pick_types,
+                 compute_raw_covariance)
+from mne.utils import requires_sklearn, run_tests_if_main
+from mne.preprocessing.xdawn import Xdawn
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+event_name = op.join(base_dir, 'test-eve.fif')
+evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
+
+tmin, tmax = -0.1, 0.2
+event_id = dict(cond2=2, cond3=3)
+
+
+def _get_data():
+    raw = io.Raw(raw_fname, add_eeg_ref=False, verbose=False, preload=True)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=False, eeg=True, stim=False,
+                       ecg=False, eog=False,
+                       exclude='bads')[::8]
+    return raw, events, picks
+
+
+def test_xdawn_init():
+    """Test init of xdawn."""
+    # init xdawn with good parameters
+    Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None)
+    # init xdawn with bad parameters
+    assert_raises(ValueError, Xdawn, correct_overlap=42)
+
+
+def test_xdawn_fit():
+    """Test Xdawn fit."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+    # =========== Basic Fit test =================
+    # test base xdawn
+    xd = Xdawn(n_components=2, correct_overlap='auto',
+               signal_cov=None, reg=None)
+    xd.fit(epochs)
+    # with this parameters, the overlapp correction must be False
+    assert_equal(xd.correct_overlap, False)
+    # no overlapp correction should give averaged evoked
+    evoked = epochs['cond2'].average()
+    assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
+
+    # ========== with signal cov provided ====================
+    # provide covariance object
+    signal_cov = compute_raw_covariance(raw, picks=picks)
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    xd.fit(epochs)
+    # provide ndarray
+    signal_cov = np.eye(len(picks))
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    xd.fit(epochs)
+    # provide ndarray of bad shape
+    signal_cov = np.eye(len(picks) - 1)
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    assert_raises(ValueError, xd.fit, epochs)
+    # provide another type
+    signal_cov = 42
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=signal_cov, reg=None)
+    assert_raises(ValueError, xd.fit, epochs)
+    # fit with baseline correction and ovverlapp correction should throw an
+    # error
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=(None, 0), verbose=False)
+
+    xd = Xdawn(n_components=2, correct_overlap=True)
+    assert_raises(ValueError, xd.fit, epochs)
+
+
+def test_xdawn_apply_transform():
+    """Test Xdawn apply and transform."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+    n_components = 2
+    # Fit Xdawn
+    xd = Xdawn(n_components=n_components, correct_overlap='auto')
+    xd.fit(epochs)
+
+    # apply on raw
+    xd.apply(raw)
+    # apply on epochs
+    xd.apply(epochs)
+    # apply on evoked
+    xd.apply(epochs.average())
+    # apply on other thing should raise an error
+    assert_raises(ValueError, xd.apply, 42)
+
+    # transform on epochs
+    xd.transform(epochs)
+    # transform on ndarray
+    xd.transform(epochs._data)
+    # transform on someting else
+    assert_raises(ValueError, xd.transform, 42)
+
+
+ at requires_sklearn
+def test_xdawn_regularization():
+    """Test Xdawn with regularization."""
+    # get data
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=True, baseline=None, verbose=False)
+
+    # test xdawn with overlap correction
+    xd = Xdawn(n_components=2, correct_overlap=True,
+               signal_cov=None, reg=0.1)
+    xd.fit(epochs)
+    # ========== with cov regularization ====================
+    # ledoit-wolf
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg='ledoit_wolf')
+    xd.fit(epochs)
+    # oas
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg='oas')
+    xd.fit(epochs)
+    # with shrinkage
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg=0.1)
+    xd.fit(epochs)
+    # with bad shrinkage
+    xd = Xdawn(n_components=2, correct_overlap=False,
+               signal_cov=np.eye(len(picks)), reg=2)
+    assert_raises(ValueError, xd.fit, epochs)
+
+run_tests_if_main()
diff --git a/mne/preprocessing/xdawn.py b/mne/preprocessing/xdawn.py
new file mode 100644
index 0000000..a113e45
--- /dev/null
+++ b/mne/preprocessing/xdawn.py
@@ -0,0 +1,484 @@
+"""Xdawn implementation."""
+# Authors: Alexandre Barachant <alexandre.barachant at gmail.com>
+#
+# License: BSD (3-clause)
+
+import copy as cp
+
+import numpy as np
+from scipy import linalg
+
+from ..io.base import _BaseRaw
+from ..epochs import _BaseEpochs
+from .. import Covariance, EvokedArray, Evoked, EpochsArray
+from ..io.pick import pick_types
+from .ica import _get_fast_dot
+from ..utils import logger
+from ..decoding.mixin import TransformerMixin
+from ..cov import _regularized_covariance
+from ..channels.channels import ContainsMixin
+
+
+def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
+    """Least square estimation of evoked response from data.
+
+    Parameters
+    ----------
+    data : ndarray, shape (n_channels, n_times)
+        The data to estimates evoked
+    events : ndarray, shape (n_events, 3)
+        The events typically returned by the read_events function.
+        If some events don't match the events of interest as specified
+        by event_id, they will be ignored.
+    event_id : dict
+        The id of the events to consider
+    tmin : float
+        Start time before event.
+    tmax : float
+        End time after event.
+    sfreq : float
+        Sampling frequency.
+
+    Returns
+    -------
+    evokeds_data : dict of ndarray
+        A dict of evoked data for each event type in event_id.
+    toeplitz : dict of ndarray
+        A dict of toeplitz matrix for each event type in event_id.
+    """
+    nmin = int(tmin * sfreq)
+    nmax = int(tmax * sfreq)
+
+    window = nmax - nmin
+    n_samples = data.shape[1]
+    toeplitz_mat = dict()
+    full_toep = list()
+    for eid in event_id:
+        # select events by type
+        ix_ev = events[:, -1] == event_id[eid]
+
+        # build toeplitz matrix
+        trig = np.zeros((n_samples, 1))
+        ix_trig = (events[ix_ev, 0]) + nmin
+        trig[ix_trig] = 1
+        toep_mat = linalg.toeplitz(trig[0:window], trig)
+        toeplitz_mat[eid] = toep_mat
+        full_toep.append(toep_mat)
+
+    # Concatenate toeplitz
+    full_toep = np.concatenate(full_toep)
+
+    # least square estimation
+    predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
+    all_evokeds = np.dot(predictor, data.T)
+    all_evokeds = np.vsplit(all_evokeds, len(event_id))
+
+    # parse evoked response
+    evoked_data = dict()
+    for idx, eid in enumerate(event_id):
+        evoked_data[eid] = all_evokeds[idx].T
+
+    return evoked_data, toeplitz_mat
+
+
+def _check_overlapp(epochs):
+    """check if events are overlapped."""
+    isi = np.diff(epochs.events[:, 0])
+    window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
+    # Events are overlapped if the minimal inter-stimulus interval is smaller
+    # than the time window.
+    return isi.min() < window
+
+
+def _construct_signal_from_epochs(epochs):
+    """Reconstruct pseudo continuous signal from epochs."""
+    start = (np.min(epochs.events[:, 0]) +
+             int(epochs.tmin * epochs.info['sfreq']))
+    stop = (np.max(epochs.events[:, 0]) +
+            int(epochs.tmax * epochs.info['sfreq']) + 1)
+
+    n_samples = stop - start
+    epochs_data = epochs.get_data()
+    n_epochs, n_channels, n_times = epochs_data.shape
+    events_pos = epochs.events[:, 0] - epochs.events[0, 0]
+
+    data = np.zeros((n_channels, n_samples))
+    for idx in range(n_epochs):
+        onset = events_pos[idx]
+        offset = onset + n_times
+        data[:, onset:offset] = epochs_data[idx]
+
+    return data
+
+
+def least_square_evoked(epochs, return_toeplitz=False):
+    """Least square estimation of evoked response from a Epochs instance.
+
+    Parameters
+    ----------
+    epochs : Epochs instance
+        An instance of Epochs.
+    return_toeplitz : bool (default False)
+        If true, compute the toeplitz matrix.
+
+    Returns
+    -------
+    evokeds : dict of evoked instance
+        An dict of evoked instance for each event type in epochs.event_id.
+    toeplitz : dict of ndarray
+        If return_toeplitz is true, return the toeplitz matrix for each event
+        type in epochs.event_id.
+    """
+    if not isinstance(epochs, _BaseEpochs):
+        raise ValueError('epochs must be an instance of `mne.Epochs`')
+
+    events = epochs.events.copy()
+    events[:, 0] -= events[0, 0] + int(epochs.tmin * epochs.info['sfreq'])
+    data = _construct_signal_from_epochs(epochs)
+    evoked_data, toeplitz = _least_square_evoked(data, events, epochs.event_id,
+                                                 tmin=epochs.tmin,
+                                                 tmax=epochs.tmax,
+                                                 sfreq=epochs.info['sfreq'])
+    evokeds = dict()
+    info = cp.deepcopy(epochs.info)
+    for name, data in evoked_data.items():
+        n_events = len(events[events[:, 2] == epochs.event_id[name]])
+        evoked = EvokedArray(data, info, tmin=epochs.tmin,
+                             comment=name, nave=n_events)
+        evokeds[name] = evoked
+
+    if return_toeplitz:
+        return evokeds, toeplitz
+
+    return evokeds
+
+
+class Xdawn(TransformerMixin, ContainsMixin):
+
+    """Implementation of the Xdawn Algorithm.
+
+    Xdawn is a spatial filtering method designed to improve the signal
+    to signal + noise ratio (SSNR) of the ERP responses. Xdawn was originaly
+    designed for P300 evoked potential by enhancing the target response with
+    respect to the non-target response. This implementation is a generalization
+    to any type of ERP.
+
+    Parameters
+    ----------
+    n_components : int (default 2)
+        The number of components to decompose M/EEG signals.
+    signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)
+        (default None). The signal covariance used for whitening of the data.
+        if None, the covariance is estimated from the epochs signal.
+    correct_overlap : 'auto' or bool (default 'auto')
+        Apply correction for overlaped ERP for the estimation of evokeds
+        responses. if 'auto', the overlapp correction is chosen in function
+        of the events in epochs.events.
+    reg : float | str | None (default None)
+        if not None, allow regularization for covariance estimation
+        if float, shrinkage covariance is used (0 <= shrinkage <= 1).
+        if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
+        or Oracle Approximating Shrinkage ('oas').
+
+    Attributes
+    ----------
+    filters_ : dict of ndarray
+        If fit, the Xdawn components used to decompose the data for each event
+        type, else empty.
+    patterns_ : dict of ndarray
+        If fit, the Xdawn patterns used to restore M/EEG signals for each event
+        type, else empty.
+    evokeds_ : dict of evoked instance
+        If fit, the evoked response for each event type.
+
+    Notes
+    -----
+    .. versionadded:: 0.10
+
+    See Also
+    --------
+    ICA
+    CSP
+
+    References
+    ----------
+    [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
+    algorithm to enhance evoked potentials: application to brain-computer
+    interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
+
+    [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
+    August). Theoretical analysis of xDAWN algorithm: application to an
+    efficient sensor selection in a P300 BCI. In Signal Processing Conference,
+    2011 19th European (pp. 1382-1386). IEEE.
+    """
+
+    def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',
+                 reg=None):
+        """init xdawn."""
+        self.n_components = n_components
+        self.signal_cov = signal_cov
+        self.reg = reg
+        self.filters_ = dict()
+        self.patterns_ = dict()
+        self.evokeds_ = dict()
+
+        if correct_overlap not in ['auto', True, False]:
+            raise ValueError('correct_overlap must be a bool or "auto"')
+        self.correct_overlap = correct_overlap
+
+    def fit(self, epochs, y=None):
+        """Fit Xdawn from epochs.
+
+        Parameters
+        ----------
+        epochs : Epochs object
+            An instance of Epoch on which Xdawn filters will be trained.
+        y : ndarray | None (default None)
+            Not used, here for compatibility with decoding API.
+
+        Returns
+        -------
+        self : Xdawn instance
+            The Xdawn instance.
+        """
+        if self.correct_overlap == 'auto':
+            self.correct_overlap = _check_overlapp(epochs)
+
+        # Extract signal covariance
+        if self.signal_cov is None:
+            if self.correct_overlap:
+                sig_data = _construct_signal_from_epochs(epochs)
+            else:
+                sig_data = np.hstack(epochs.get_data())
+            self.signal_cov_ = _regularized_covariance(sig_data, self.reg)
+        elif isinstance(self.signal_cov, Covariance):
+            self.signal_cov_ = self.signal_cov.data
+        elif isinstance(self.signal_cov, np.ndarray):
+            self.signal_cov_ = self.signal_cov
+        else:
+            raise ValueError('signal_cov must be None, a covariance instance '
+                             'or a ndarray')
+
+        # estimates evoked covariance
+        self.evokeds_cov_ = dict()
+        if self.correct_overlap:
+            if epochs.baseline is not None:
+                raise ValueError('Baseline correction must be None if overlap '
+                                 'correction activated')
+            evokeds, toeplitz = least_square_evoked(epochs,
+                                                    return_toeplitz=True)
+        else:
+            evokeds = dict()
+            toeplitz = dict()
+            for eid in epochs.event_id:
+                evokeds[eid] = epochs[eid].average()
+                toeplitz[eid] = 1.0
+        self.evokeds_ = evokeds
+
+        for eid in epochs.event_id:
+            data = np.dot(evokeds[eid].data, toeplitz[eid])
+            self.evokeds_cov_[eid] = _regularized_covariance(data, self.reg)
+
+        # estimates spatial filters
+        for eid in epochs.event_id:
+
+            if self.signal_cov_.shape != self.evokeds_cov_[eid].shape:
+                raise ValueError('Size of signal cov must be the same as the'
+                                 ' number of channels in epochs')
+
+            evals, evecs = linalg.eigh(self.evokeds_cov_[eid],
+                                       self.signal_cov_)
+            evecs = evecs[:, np.argsort(evals)[::-1]]  # sort eigenvectors
+            evecs /= np.sqrt(np.sum(evecs ** 2, axis=0))
+
+            self.filters_[eid] = evecs
+            self.patterns_[eid] = linalg.inv(evecs.T)
+
+        # store some values
+        self.ch_names = epochs.ch_names
+        self.exclude = list(range(self.n_components, len(self.ch_names)))
+        self.event_id = epochs.event_id
+        return self
+
+    def transform(self, epochs):
+        """Apply Xdawn dim reduction.
+
+        Parameters
+        ----------
+        epochs : Epochs | ndarray, shape (n_epochs, n_channels, n_times)
+            Data on which Xdawn filters will be applied.
+
+        Returns
+        -------
+        X : ndarray, shape (n_epochs, n_components * event_types, n_times)
+            Spatially filtered signals.
+        """
+        if isinstance(epochs, _BaseEpochs):
+            data = epochs.get_data()
+        elif isinstance(epochs, np.ndarray):
+            data = epochs
+        else:
+            raise ValueError('Data input must be of Epoch '
+                             'type or numpy array')
+
+        # create full matrix of spatial filter
+        full_filters = list()
+        for filt in self.filters_.values():
+            full_filters.append(filt[:, 0:self.n_components])
+        full_filters = np.concatenate(full_filters, axis=1)
+
+        # Apply spatial filters
+        X = np.dot(full_filters.T, data)
+        X = X.transpose((1, 0, 2))
+        return X
+
+    def apply(self, inst, event_id=None, include=None, exclude=None):
+        """Remove selected components from the signal.
+
+        Given the unmixing matrix, transform data,
+        zero out components, and inverse transform the data.
+        This procedure will reconstruct M/EEG signals from which
+        the dynamics described by the excluded components is subtracted.
+
+        Parameters
+        ----------
+        inst : instance of Raw | Epochs | Evoked
+            The data to be processed.
+        event_id : dict | list of str | None (default None)
+            The kind of event to apply. if None, a dict of inst will be return
+            one for each type of event xdawn has been fitted.
+        include : array_like of int | None (default None)
+            The indices refering to columns in the ummixing matrix. The
+            components to be kept. If None, the first n_components (as defined
+            in the Xdawn constructor) will be kept.
+        exclude : array_like of int | None (default None)
+            The indices refering to columns in the ummixing matrix. The
+            components to be zeroed out. If None, all the components except the
+            first n_components will be exclude.
+
+        Returns
+        -------
+        out : dict of instance
+            A dict of instance (from the same type as inst input) for each
+            event type in event_id.
+        """
+        if event_id is None:
+            event_id = self.event_id
+
+        if isinstance(inst, _BaseRaw):
+            out = self._apply_raw(raw=inst, include=include, exclude=exclude,
+                                  event_id=event_id)
+        elif isinstance(inst, _BaseEpochs):
+            out = self._apply_epochs(epochs=inst, include=include,
+                                     exclude=exclude, event_id=event_id)
+        elif isinstance(inst, Evoked):
+            out = self._apply_evoked(evoked=inst, include=include,
+                                     exclude=exclude, event_id=event_id)
+        else:
+            raise ValueError('Data input must be Raw, Epochs or Evoked type')
+        return out
+
+    def _apply_raw(self, raw, include, exclude, event_id):
+        """Aux method."""
+        if not raw.preload:
+            raise ValueError('Raw data must be preloaded to apply Xdawn')
+
+        picks = pick_types(raw.info, meg=False, include=self.ch_names,
+                           exclude='bads')
+        raws = dict()
+        for eid in event_id:
+            data = raw[picks, :][0]
+
+            data = self._pick_sources(data, include, exclude, eid)
+
+            raw_r = raw.copy()
+
+            raw_r[picks, :] = data
+            raws[eid] = raw_r
+        return raws
+
+    def _apply_epochs(self, epochs, include, exclude, event_id):
+        """Aux method."""
+        if not epochs.preload:
+            raise ValueError('Epochs must be preloaded to apply Xdawn')
+
+        picks = pick_types(epochs.info, meg=False, ref_meg=False,
+                           include=self.ch_names, exclude='bads')
+
+        # special case where epochs come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Epochs don\'t match fitted data: %i channels '
+                               'fitted but %i channels supplied. \nPlease '
+                               'provide Epochs compatible with '
+                               'xdawn.ch_names' % (len(self.ch_names),
+                                                   len(picks)))
+
+        epochs_dict = dict()
+        data = np.hstack(epochs.get_data()[:, picks])
+
+        for eid in event_id:
+
+            data_r = self._pick_sources(data, include, exclude, eid)
+            data_r = np.array(np.split(data_r, len(epochs.events), 1))
+            info_r = cp.deepcopy(epochs.info)
+            epochs_r = EpochsArray(data=data_r, info=info_r,
+                                   events=epochs.events, tmin=epochs.tmin,
+                                   event_id=epochs.event_id, verbose=False)
+            epochs_r.preload = True
+            epochs_dict[eid] = epochs_r
+
+        return epochs_dict
+
+    def _apply_evoked(self, evoked, include, exclude, event_id):
+        """Aux method."""
+        picks = pick_types(evoked.info, meg=False, ref_meg=False,
+                           include=self.ch_names,
+                           exclude='bads')
+
+        # special case where evoked come picked but fit was 'unpicked'.
+        if len(picks) != len(self.ch_names):
+            raise RuntimeError('Evoked does not match fitted data: %i channels'
+                               ' fitted but %i channels supplied. \nPlease '
+                               'provide an Evoked object that\'s compatible '
+                               'with xdawn.ch_names' % (len(self.ch_names),
+                                                        len(picks)))
+
+        data = evoked.data[picks]
+        evokeds = dict()
+
+        for eid in event_id:
+
+            data_r = self._pick_sources(data, include, exclude, eid)
+            evokeds[eid] = evoked.copy()
+
+            # restore evoked
+            evokeds[eid].data[picks] = data_r
+
+        return evokeds
+
+    def _pick_sources(self, data, include, exclude, eid):
+        """Aux method."""
+        fast_dot = _get_fast_dot()
+        if exclude is None:
+            exclude = self.exclude
+        else:
+            exclude = list(set(list(self.exclude) + list(exclude)))
+
+        logger.info('Transforming to Xdawn space')
+
+        # Apply unmixing
+        sources = fast_dot(self.filters_[eid].T, data)
+
+        if include not in (None, []):
+            mask = np.ones(len(sources), dtype=np.bool)
+            mask[np.unique(include)] = False
+            sources[mask] = 0.
+            logger.info('Zeroing out %i Xdawn components' % mask.sum())
+        elif exclude not in (None, []):
+            exclude_ = np.unique(exclude)
+            sources[exclude_] = 0.
+            logger.info('Zeroing out %i Xdawn components' % len(exclude_))
+        logger.info('Inverse transforming to sensor space')
+        data = fast_dot(self.patterns_[eid], sources)
+
+        return data
diff --git a/mne/proj.py b/mne/proj.py
index fda15cb..c146331 100644
--- a/mne/proj.py
+++ b/mne/proj.py
@@ -14,7 +14,7 @@ from .parallel import parallel_func
 from .cov import _check_n_samples
 from .forward import (is_fixed_orient, _subject_from_forward,
                       convert_forward_solution)
-from .source_estimate import SourceEstimate
+from .source_estimate import SourceEstimate, VolSourceEstimate
 from .io.proj import make_projector, make_eeg_average_ref_proj
 
 
@@ -31,6 +31,10 @@ def read_proj(fname):
     -------
     projs : list
         The list of projection vectors.
+
+    See Also
+    --------
+    write_proj
     """
     check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
 
@@ -51,6 +55,10 @@ def write_proj(fname, projs):
 
     projs : list
         The list of projection vectors.
+
+    See Also
+    --------
+    read_proj
     """
     check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz'))
 
@@ -63,7 +71,8 @@ def write_proj(fname, projs):
 def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, verbose=None):
     mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads')
     grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads')
-    eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude='bads')
+    eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False,
+                         exclude='bads')
 
     if (n_grad > 0) and len(grad_ind) == 0:
         logger.info("No gradiometers found. Forcing n_grad to 0")
@@ -104,7 +113,7 @@ def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, verbose=None):
 
 @verbose
 def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
-                        verbose=None):
+                        desc_prefix=None, verbose=None):
     """Compute SSP (spatial space projection) vectors on Epochs
 
     Parameters
@@ -119,6 +128,9 @@ def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
         Number of vectors for EEG channels
     n_jobs : int
         Number of jobs to use to compute covariance
+    desc_prefix : str | None
+        The description prefix to use. If None, one will be created based on
+        the event_id, tmin, and tmax.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -126,6 +138,10 @@ def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
     -------
     projs: list
         List of projection vectors
+
+    See Also
+    --------
+    compute_proj_raw, compute_proj_evoked
     """
     # compute data covariance
     data = _compute_cov_epochs(epochs, n_jobs)
@@ -136,7 +152,8 @@ def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
         event_id = str(list(event_id.values())[0])
     else:
         event_id = 'Multiple-events'
-    desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
+    if desc_prefix is None:
+        desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
     return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix)
 
 
@@ -175,6 +192,10 @@ def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, verbose=None):
     -------
     projs : list
         List of projection vectors
+
+    See Also
+    --------
+    compute_proj_raw, compute_proj_epochs
     """
     data = np.dot(evoked.data, evoked.data.T)  # compute data covariance
     desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
@@ -189,27 +210,27 @@ def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
     Parameters
     ----------
     raw : instance of Raw
-        A raw object to use the data from
+        A raw object to use the data from.
     start : float
-        Time (in sec) to start computing SSP
+        Time (in sec) to start computing SSP.
     stop : float
-        Time (in sec) to stop computing SSP
-        None will go to the end of the file
+        Time (in sec) to stop computing SSP.
+        None will go to the end of the file.
     duration : float
         Duration (in sec) to chunk data into for SSP
         If duration is None, data will not be chunked.
     n_grad : int
-        Number of vectors for gradiometers
+        Number of vectors for gradiometers.
     n_mag : int
-        Number of vectors for magnetometers
+        Number of vectors for magnetometers.
     n_eeg : int
-        Number of vectors for EEG channels
-    reject : dict
-        Epoch rejection configuration (see Epochs)
-    flat : dict
-        Epoch flat configuration (see Epochs)
+        Number of vectors for EEG channels.
+    reject : dict | None
+        Epoch rejection configuration (see Epochs).
+    flat : dict | None
+        Epoch flat configuration (see Epochs).
     n_jobs : int
-        Number of jobs to use to compute covariance
+        Number of jobs to use to compute covariance.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -217,6 +238,10 @@ def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
     -------
     projs: list
         List of projection vectors
+
+    See Also
+    --------
+    compute_proj_epochs, compute_proj_evoked
     """
     if duration is not None:
         events = make_fixed_length_events(raw, 999, start, stop, duration)
@@ -273,17 +298,17 @@ def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
-    Return
-    ------
-    stc : SourceEstimate
-        The sensitivity map as a SourceEstimate instance for
-        visualization.
+    Returns
+    -------
+    stc : SourceEstimate | VolSourceEstimate
+        The sensitivity map as a SourceEstimate or VolSourceEstimate instance
+        for visualization.
     """
     # check strings
-    if not ch_type in ['eeg', 'grad', 'mag']:
+    if ch_type not in ['eeg', 'grad', 'mag']:
         raise ValueError("ch_type should be 'eeg', 'mag' or 'grad (got %s)"
                          % ch_type)
-    if not mode in ['free', 'fixed', 'ratio', 'radiality', 'angle',
+    if mode not in ['free', 'fixed', 'ratio', 'radiality', 'angle',
                     'remaining', 'dampening']:
         raise ValueError('Unknown mode type (got %s)' % mode)
 
@@ -359,9 +384,13 @@ def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
     if mode in ['fixed', 'free']:
         sensitivity_map /= np.max(sensitivity_map)
 
-    vertices = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
     subject = _subject_from_forward(fwd)
-    stc = SourceEstimate(sensitivity_map[:, np.newaxis],
-                         vertices=vertices, tmin=0, tstep=1,
-                         subject=subject)
+    if fwd['src'][0]['type'] == 'vol':  # volume source space
+        vertices = fwd['src'][0]['vertno']
+        SEClass = VolSourceEstimate
+    else:
+        vertices = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
+        SEClass = SourceEstimate
+    stc = SEClass(sensitivity_map[:, np.newaxis], vertices=vertices, tmin=0,
+                  tstep=1, subject=subject)
     return stc
diff --git a/mne/realtime/client.py b/mne/realtime/client.py
index d82e414..e17e102 100644
--- a/mne/realtime/client.py
+++ b/mne/realtime/client.py
@@ -7,7 +7,6 @@ from __future__ import print_function
 
 import socket
 import time
-import struct
 from ..externals.six.moves import StringIO
 import threading
 
@@ -43,7 +42,7 @@ def _recv_tag_raw(sock):
     if len(s) != 16:
         raise RuntimeError('Not enough bytes received, something is wrong. '
                            'Make sure the mne_rt_server is running.')
-    tag = Tag(*struct.unpack(">iiii", s))
+    tag = Tag(*np.fromstring(s, '>i4'))
     n_received = 0
     rec_buff = [s]
     while n_received < tag.size:
@@ -150,10 +149,10 @@ class RtClient(object):
 
         buf, chunk, begin = [], '', time.time()
         while True:
-            #if we got some data, then break after wait sec
+            # if we got some data, then break after wait sec
             if buf and time.time() - begin > self._timeout:
                 break
-            #if we got no data at all, wait a little longer
+            # if we got no data at all, wait a little longer
             elif time.time() - begin > self._timeout * 2:
                 break
             try:
@@ -258,7 +257,7 @@ class RtClient(object):
         else:
             raise RuntimeError('wrong tag received')
 
-        return  client_id
+        return client_id
 
     def start_measurement(self):
         """Start the measurement"""
@@ -287,7 +286,7 @@ class RtClient(object):
                                                  args=(self, nchan))
             self._recv_thread.start()
 
-    def stop_receive_thread(self, nchan, stop_measurement=False):
+    def stop_receive_thread(self, stop_measurement=False):
         """Stop the receive thread
 
         Parameters
@@ -316,6 +315,11 @@ class RtClient(object):
 
     def unregister_receive_callback(self, callback):
         """Unregister a raw buffer receive callback
+
+        Parameters
+        ----------
+        callback : function
+            The callback to unregister.
         """
         if callback in self._recv_callbacks:
             self._recv_callbacks.remove(callback)
diff --git a/mne/realtime/epochs.py b/mne/realtime/epochs.py
index a257a79..1bf0df7 100644
--- a/mne/realtime/epochs.py
+++ b/mne/realtime/epochs.py
@@ -10,13 +10,10 @@ import copy
 
 import numpy as np
 
-from .. import pick_channels, pick_types
+from .. import pick_channels
 from ..utils import logger, verbose
-from ..baseline import rescale
 from ..epochs import _BaseEpochs
 from ..event import _find_events
-from ..filter import detrend
-from ..io.proj import setup_proj
 
 
 class RtEpochs(_BaseEpochs):
@@ -25,16 +22,16 @@ class RtEpochs(_BaseEpochs):
     Can receive epochs in real time from an RtClient.
 
     For example, to get some epochs from a running mne_rt_server on
-    'localhost', you could use:
+    'localhost', you could use::
 
-    client = mne.realtime.RtClient('localhost')
-    event_id, tmin, tmax = 1, -0.2, 0.5
+        client = mne.realtime.RtClient('localhost')
+        event_id, tmin, tmax = 1, -0.2, 0.5
 
-    epochs = mne.realtime.RtEpochs(client, event_id, tmin, tmax)
-    epochs.start()  # start the measurement and start receiving epochs
+        epochs = mne.realtime.RtEpochs(client, event_id, tmin, tmax)
+        epochs.start()  # start the measurement and start receiving epochs
 
-    evoked_1 = epochs.average()  # computed over all epochs
-    evoked_2 = epochs[-5:].average()  # computed over the last 5 epochs
+        evoked_1 = epochs.average()  # computed over all epochs
+        evoked_2 = epochs[-5:].average()  # computed over the last 5 epochs
 
     Parameters
     ----------
@@ -54,8 +51,6 @@ class RtEpochs(_BaseEpochs):
     sleep_time : float
         Time in seconds to wait between checking for new epochs when epochs
         are requested and the receive queue is empty.
-    name : string
-        Comment that describes the Evoked data created.
     baseline : None (default) or tuple of length 2
         The time interval to apply baseline correction.
         If None do not apply it. If baseline is (a, b)
@@ -64,19 +59,24 @@ class RtEpochs(_BaseEpochs):
         and if b is None then b is set to the end of the interval.
         If baseline is equal to (None, None) all the time
         interval is used.
-    reject : dict
-        Epoch rejection parameters based on peak to peak amplitude.
+    picks : array-like of int | None (default)
+        Indices of channels to include (if None, all channels are used).
+    name : string
+        Comment that describes the Evoked data created.
+    reject : dict | None
+        Rejection parameters based on peak-to-peak amplitude.
         Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
-        If reject is None then no rejection is done.
-        Values are float. Example:
-        reject = dict(grad=4000e-13, # T / m (gradiometers)
-                      mag=4e-12, # T (magnetometers)
-                      eeg=40e-6, # uV (EEG channels)
-                      eog=250e-6 # uV (EOG channels)
-                      )
-    flat : dict
-        Epoch rejection parameters based on flatness of signal
-        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'
+        If reject is None then no rejection is done. Example::
+
+            reject = dict(grad=4000e-13, # T / m (gradiometers)
+                          mag=4e-12, # T (magnetometers)
+                          eeg=40e-6, # uV (EEG channels)
+                          eog=250e-6 # uV (EOG channels))
+
+    flat : dict | None
+        Rejection parameters based on flatness of signal.
+        Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
+        are floats that set the minimum acceptable peak-to-peak amplitude.
         If flat is None then no rejection is done.
     proj : bool, optional
         Apply SSP projection vectors
@@ -104,6 +104,16 @@ class RtEpochs(_BaseEpochs):
     isi_max : float
         The maximmum time in seconds between epochs. If no epoch
         arrives in the next isi_max seconds the RtEpochs stops.
+    find_events : dict
+        The arguments to the real-time `find_events` method as a dictionary.
+        If `find_events` is None, then default values are used.
+        Valid keys are 'output' | 'consecutive' | 'min_duration' | 'mask'.
+        Example (also default values)::
+
+            find_events = dict(output='onset', consecutive='increasing',
+                               min_duration=0, mask=0)
+
+        See mne.find_events for detailed explanation of these options.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
         Defaults to client.verbose.
@@ -116,7 +126,7 @@ class RtEpochs(_BaseEpochs):
         Names of  of conditions corresponding to event_ids.
     ch_names : list of string
         List of channels' names.
-    events : list of tuples
+    events : array, shape (n_events, 3)
         The events associated with the epochs currently in the queue.
     verbose : bool, str, int, or None
         See above.
@@ -126,7 +136,7 @@ class RtEpochs(_BaseEpochs):
                  sleep_time=0.1, baseline=(None, 0), picks=None,
                  name='Unknown', reject=None, flat=None, proj=True,
                  decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
-                 add_eeg_ref=True, isi_max=2., verbose=None):
+                 add_eeg_ref=True, isi_max=2., find_events=None, verbose=None):
 
         info = client.get_measurement_info()
 
@@ -136,15 +146,11 @@ class RtEpochs(_BaseEpochs):
         verbose = client.verbose if verbose is None else verbose
 
         # call _BaseEpochs constructor
-        super(RtEpochs, self).__init__(info, event_id, tmin, tmax,
-                baseline=baseline, picks=picks, name=name, reject=reject,
-                flat=flat, decim=decim, reject_tmin=reject_tmin,
-                reject_tmax=reject_tmax, detrend=detrend,
-                add_eeg_ref=add_eeg_ref, verbose=verbose)
-
-        self.proj = proj
-        self._projector, self.info = setup_proj(self.info, add_eeg_ref,
-                                                activate=self.proj)
+        super(RtEpochs, self).__init__(
+            info, None, None, event_id, tmin, tmax, baseline, picks=picks,
+            name=name, reject=reject, flat=flat, decim=decim,
+            reject_tmin=reject_tmin, reject_tmax=reject_tmax, detrend=detrend,
+            add_eeg_ref=add_eeg_ref, verbose=verbose, proj=True)
 
         self._client = client
 
@@ -160,18 +166,30 @@ class RtEpochs(_BaseEpochs):
 
         self._stim_picks = stim_picks
 
+        # find_events default options
+        self._find_events_kwargs = dict(output='onset',
+                                        consecutive='increasing',
+                                        min_duration=0, mask=0)
+        # update default options if dictionary is provided
+        if find_events is not None:
+            self._find_events_kwargs.update(find_events)
+        min_samples = (self._find_events_kwargs['min_duration'] *
+                       self.info['sfreq'])
+        self._find_events_kwargs.pop('min_duration', None)
+        self._find_events_kwargs['min_samples'] = min_samples
+
         self._sleep_time = sleep_time
 
         # add calibration factors
         cals = np.zeros(self._client_info['nchan'])
         for k in range(self._client_info['nchan']):
-            cals[k] = (self._client_info['chs'][k]['range']
-                       * self._client_info['chs'][k]['cal'])
+            cals[k] = (self._client_info['chs'][k]['range'] *
+                       self._client_info['chs'][k]['cal'])
         self._cals = cals[:, None]
 
         # FIFO queues for received epochs and events
         self._epoch_queue = list()
-        self.events = list()
+        self._events = list()
 
         # variables needed for receiving raw buffers
         self._last_buffer = None
@@ -187,6 +205,11 @@ class RtEpochs(_BaseEpochs):
 
         self.isi_max = isi_max
 
+    @property
+    def events(self):
+        """The events associated with the epochs currently in the queue."""
+        return np.array(self._events)
+
     def start(self):
         """Start receiving epochs
 
@@ -200,8 +223,7 @@ class RtEpochs(_BaseEpochs):
             nchan = self._client_info['nchan']
             self._client.start_receive_thread(nchan)
             self._started = True
-
-            self._last_time = np.inf  # init delay counter. Will stop iterations
+            self._last_time = np.inf  # init delay counter. Will stop iters
 
     def stop(self, stop_receive_thread=False, stop_measurement=False):
         """Stop receiving epochs
@@ -226,6 +248,18 @@ class RtEpochs(_BaseEpochs):
 
     def next(self, return_event_id=False):
         """To make iteration over epochs easy.
+
+        Parameters
+        ----------
+        return_event_id : bool
+            If True, return both an epoch and and event_id.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The epoch.
+        event_id : int
+            The event id. Only returned if ``return_event_id`` is ``True``.
         """
         first = True
         while True:
@@ -235,7 +269,7 @@ class RtEpochs(_BaseEpochs):
                 raise StopIteration
             if len(self._epoch_queue) > self._current:
                 epoch = self._epoch_queue[self._current]
-                event_id = self.events[self._current][-1]
+                event_id = self._events[self._current][-1]
                 self._current += 1
                 self._last_time = current_time
                 if return_event_id:
@@ -251,7 +285,7 @@ class RtEpochs(_BaseEpochs):
                 raise RuntimeError('Not enough epochs in queue and currently '
                                    'not receiving epochs, cannot get epochs!')
 
-    def _get_data_from_disk(self):
+    def _get_data(self):
         """Return the data for n_epochs epochs"""
 
         epochs = list()
@@ -266,7 +300,7 @@ class RtEpochs(_BaseEpochs):
         """Process raw buffer (callback from RtClient)
 
         Note: Do not print log messages during regular use. It will be printed
-        asynchronously which is annyoing when working in an interactive shell.
+        asynchronously which is annoying when working in an interactive shell.
 
         Parameters
         ----------
@@ -289,7 +323,8 @@ class RtEpochs(_BaseEpochs):
         # detect events
         data = np.abs(raw_buffer[self._stim_picks]).astype(np.int)
         data = np.atleast_2d(data)
-        buff_events = _find_events(data, self._first_samp, verbose=verbose)
+        buff_events = _find_events(data, self._first_samp, verbose=verbose,
+                                   **self._find_events_kwargs)
 
         events = self._event_backlog
         for event_id in self.event_id.values():
@@ -302,14 +337,14 @@ class RtEpochs(_BaseEpochs):
         event_backlog = list()
         for event_samp, event_id in events:
             epoch = None
-            if (event_samp + tmin_samp >= self._first_samp
-                    and event_samp + tmax_samp <= last_samp):
+            if (event_samp + tmin_samp >= self._first_samp and
+                    event_samp + tmax_samp <= last_samp):
                 # easy case: whole epoch is in this buffer
                 start = event_samp + tmin_samp - self._first_samp
                 stop = event_samp + tmax_samp - self._first_samp
                 epoch = raw_buffer[:, start:stop]
-            elif (event_samp + tmin_samp < self._first_samp
-                    and event_samp + tmax_samp <= last_samp):
+            elif (event_samp + tmin_samp < self._first_samp and
+                    event_samp + tmax_samp <= last_samp):
                 # have to use some samples from previous buffer
                 if self._last_buffer is None:
                     continue
@@ -318,10 +353,7 @@ class RtEpochs(_BaseEpochs):
                 epoch = np.c_[self._last_buffer[:, -n_last:],
                               raw_buffer[:, :n_this]]
             elif event_samp + tmax_samp > last_samp:
-                # we need samples from next buffer
-                if event_samp + tmin_samp < self._first_samp:
-                    raise RuntimeError('Epoch spans more than two raw '
-                                       'buffers, increase buffer size!')
+                # we need samples from the future
                 # we will process this epoch with the next buffer
                 event_backlog.append((event_samp, event_id))
             else:
@@ -332,8 +364,17 @@ class RtEpochs(_BaseEpochs):
 
         # set things up for processing of next buffer
         self._event_backlog = event_backlog
-        self._first_samp = last_samp + 1
-        self._last_buffer = raw_buffer
+        n_buffer = raw_buffer.shape[1]
+        if self._last_buffer is None:
+            self._last_buffer = raw_buffer
+            self._first_samp = last_samp + 1
+        elif self._last_buffer.shape[1] <= n_samp + n_buffer:
+            self._last_buffer = np.c_[self._last_buffer, raw_buffer]
+        else:
+            # do not increase size of _last_buffer any further
+            self._first_samp = self._first_samp + n_buffer
+            self._last_buffer[:, :-n_buffer] = self._last_buffer[:, n_buffer:]
+            self._last_buffer[:, -n_buffer:] = raw_buffer
 
     def _append_epoch_to_queue(self, epoch, event_samp, event_id):
         """Append a (raw) epoch to queue
@@ -354,23 +395,18 @@ class RtEpochs(_BaseEpochs):
         # select the channels
         epoch = epoch[self.picks, :]
 
-        # handle offset
-        if self._offset is not None:
-            epoch += self._offset
+        # Detrend, baseline correct, decimate
+        epoch = self._detrend_offset_decim(epoch, verbose='ERROR')
 
         # apply SSP
-        if self.proj and self._projector is not None:
-            epoch = np.dot(self._projector, epoch)
-
-        # Detrend, baseline correct, decimate
-        epoch = self._preprocess(epoch, verbose='ERROR')
+        epoch = self._project_epoch(epoch)
 
         # Decide if this is a good epoch
         is_good, _ = self._is_good_epoch(epoch, verbose='ERROR')
 
         if is_good:
             self._epoch_queue.append(epoch)
-            self.events.append((event_samp, 0, event_id))
+            self._events.append((event_samp, 0, event_id))
             self._n_good += 1
         else:
             self._n_bad += 1
diff --git a/mne/realtime/fieldtrip_client.py b/mne/realtime/fieldtrip_client.py
index bee2746..24820ea 100644
--- a/mne/realtime/fieldtrip_client.py
+++ b/mne/realtime/fieldtrip_client.py
@@ -10,7 +10,9 @@ import warnings
 import numpy as np
 
 from ..io.constants import FIFF
-from ..io.meas_info import Info
+from ..io.meas_info import _empty_info
+from ..io.pick import pick_info
+from ..epochs import EpochsArray
 from ..utils import logger
 from ..externals.FieldTrip import Client as FtClient
 
@@ -137,7 +139,7 @@ class FieldTripClient(object):
             warnings.warn('Info dictionary not provided. Trying to guess it '
                           'from FieldTrip Header object')
 
-            info = Info()  # create info dictionary
+            info = _empty_info()  # create info dictionary
 
             # modify info attributes according to the FieldTrip Header object
             info['nchan'] = self.ft_header.nChannels
@@ -167,6 +169,8 @@ class FieldTripClient(object):
                     this_info['kind'] = FIFF.FIFFV_MCG_CH
                 elif ch.startswith('EOG'):
                     this_info['kind'] = FIFF.FIFFV_EOG_CH
+                elif ch.startswith('EMG'):
+                    this_info['kind'] = FIFF.FIFFV_EMG_CH
                 elif ch.startswith('STI'):
                     this_info['kind'] = FIFF.FIFFV_STIM_CH
                 elif ch.startswith('ECG'):
@@ -179,9 +183,7 @@ class FieldTripClient(object):
                 this_info['cal'] = 1.0
 
                 this_info['ch_name'] = ch
-                this_info['coil_trans'] = None
                 this_info['loc'] = None
-                this_info['eeg_loc'] = None
 
                 if ch.startswith('EEG'):
                     this_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
@@ -192,8 +194,8 @@ class FieldTripClient(object):
 
                 if ch.startswith('MEG') and ch.endswith('1'):
                     this_info['unit'] = FIFF.FIFF_UNIT_T
-                elif ch.startswith('MEG') and (ch.endswith('2')
-                                               or ch.endswith('3')):
+                elif ch.startswith('MEG') and (ch.endswith('2') or
+                                               ch.endswith('3')):
                     this_info['unit'] = FIFF.FIFF_UNIT_T_M
                 else:
                     this_info['unit'] = FIFF.FIFF_UNIT_V
@@ -204,6 +206,16 @@ class FieldTripClient(object):
 
         else:
 
+            # XXX: the data in real-time mode and offline mode
+            # does not match unless this is done
+            self.info['projs'] = list()
+
+            # FieldTrip buffer already does the calibration
+            for this_info in self.info['chs']:
+                this_info['range'] = 1.0
+                this_info['cal'] = 1.0
+                this_info['unit_mul'] = 0
+
             info = copy.deepcopy(self.info)
 
         return info
@@ -218,6 +230,43 @@ class FieldTripClient(object):
         """
         return self.info
 
+    def get_data_as_epoch(self, n_samples=1024, picks=None):
+        """Returns last n_samples from current time.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of samples to fetch.
+        picks : array-like of int | None
+            If None all channels are kept
+            otherwise the channels indices in picks are kept.
+
+        Returns
+        -------
+        epoch : instance of Epochs
+            The samples fetched as an Epochs object.
+
+        See Also
+        --------
+        Epochs.iter_evoked
+        """
+        ft_header = self.ft_client.getHeader()
+        last_samp = ft_header.nSamples - 1
+        start = last_samp - n_samples + 1
+        stop = last_samp
+        events = np.expand_dims(np.array([start, 1, 1]), axis=0)
+
+        # get the data
+        data = self.ft_client.getData([start, stop]).transpose()
+
+        # create epoch from data
+        info = self.info
+        if picks is not None:
+            info = pick_info(info, picks, copy=True)
+        epoch = EpochsArray(data[picks][np.newaxis], info, events)
+
+        return epoch
+
     def register_receive_callback(self, callback):
         """Register a raw buffer receive callback.
 
@@ -231,7 +280,13 @@ class FieldTripClient(object):
             self._recv_callbacks.append(callback)
 
     def unregister_receive_callback(self, callback):
-        """Unregister a raw buffer receive callback."""
+        """Unregister a raw buffer receive callback
+
+        Parameters
+        ----------
+        callback : callable
+            The callback to unregister.
+        """
         if callback in self._recv_callbacks:
             self._recv_callbacks.remove(callback)
 
@@ -281,7 +336,7 @@ class FieldTripClient(object):
 
         iter_times = zip(range(self.tmin_samp, self.tmax_samp,
                                self.buffer_size),
-                         range(self.tmin_samp + self.buffer_size,
+                         range(self.tmin_samp + self.buffer_size - 1,
                                self.tmax_samp, self.buffer_size))
 
         for ii, (start, stop) in enumerate(iter_times):
diff --git a/mne/realtime/mockclient.py b/mne/realtime/mockclient.py
index fd6e757..8795b88 100644
--- a/mne/realtime/mockclient.py
+++ b/mne/realtime/mockclient.py
@@ -12,12 +12,10 @@ from ..event import find_events
 class MockRtClient(object):
     """Mock Realtime Client
 
-    Attributes
+    Parameters
     ----------
     raw : instance of Raw object
         The raw object which simulates the RtClient
-    info : dict
-        Measurement info.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
     """
@@ -72,13 +70,10 @@ class MockRtClient(object):
             data, times = self.raw[:, start:stop]
 
             # to undo the calibration done in _process_raw_buffer
-            cals = np.zeros(self.info['nchan'])
-            for k in range(self.info['nchan']):
-                cals[k] = (self.info['chs'][k]['range']
-                           * self.info['chs'][k]['cal'])
+            cals = np.array([[self.info['chs'][k]['range'] *
+                              self.info['chs'][k]['cal'] for k in picks]]).T
 
-            self._cals = cals[:, None]
-            data[picks, :] = data[picks, :] / self._cals
+            data[picks, :] = data[picks, :] / cals
 
             epochs._process_raw_buffer(data)
 
@@ -103,6 +98,8 @@ class MockRtClient(object):
             Start time before event.
         tmax : float
             End time after event.
+        picks : array-like of int
+            Indices of channels.
         stim_channel : None | string | list of string
             Name of the stim channel or all the stim channels
             affected by the trigger. If None, the config variables
@@ -159,15 +156,33 @@ class MockRtClient(object):
             return None
 
     def register_receive_callback(self, x):
-        """API boilerplate"""
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
         pass
 
     def start_receive_thread(self, x):
-        """API boilerplate"""
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
         pass
 
     def unregister_receive_callback(self, x):
-        """API boilerplate"""
+        """API boilerplate
+
+        Parameters
+        ----------
+        x : None
+            Not used.
+        """
         pass
 
     def _stop_receive_thread(self):
diff --git a/mne/realtime/stim_server_client.py b/mne/realtime/stim_server_client.py
index 57606f0..f06cf0d 100644
--- a/mne/realtime/stim_server_client.py
+++ b/mne/realtime/stim_server_client.py
@@ -28,8 +28,8 @@ class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
     def __init__(self, server_address, request_handler_class,
                  stim_server):
 
-    # Basically, this server is the same as a normal TCPServer class
-    # except that it has an additional attribute stim_server
+        # Basically, this server is the same as a normal TCPServer class
+        # except that it has an additional attribute stim_server
 
         # Create the server and bind it to the desired server address
         socketserver.TCPServer.__init__(self, server_address,
@@ -94,6 +94,10 @@ class StimServer(object):
         The port to which the stimulation server must bind to.
     n_clients : int
         The number of clients which will connect to the server.
+
+    See Also
+    --------
+    StimClient
     """
 
     def __init__(self, ip='localhost', port=4218, n_clients=1):
@@ -206,6 +210,10 @@ class StimServer(object):
             The trigger to be added to the queue for sending to StimClient.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        StimClient.get_trigger
         """
 
         for client in self._clients:
@@ -230,6 +238,10 @@ class StimClient(object):
         Communication timeout in seconds.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    StimServer
     """
 
     @verbose
@@ -260,6 +272,7 @@ class StimClient(object):
                                'computer connection (host: %s '
                                'port: %d) failed. Make sure StimServer '
                                'is running.' % (host, port))
+
     def close(self):
         """Close the socket object"""
         self._sock.close()
@@ -274,6 +287,10 @@ class StimClient(object):
             maximum time to wait for a valid trigger from the server
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        StimServer.add_trigger
         """
         start_time = time.time()  # init delay counter. Will stop iterations
 
diff --git a/mne/realtime/tests/test_fieldtrip_client.py b/mne/realtime/tests/test_fieldtrip_client.py
index 3ce945d..c17a4a5 100644
--- a/mne/realtime/tests/test_fieldtrip_client.py
+++ b/mne/realtime/tests/test_fieldtrip_client.py
@@ -9,9 +9,10 @@ import subprocess
 import warnings
 import os.path as op
 
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_equal
 
-from mne.utils import requires_neuromag2ft
+import mne
+from mne.utils import requires_neuromag2ft, run_tests_if_main
 from mne.realtime import FieldTripClient
 from mne.externals.six.moves import queue
 
@@ -22,13 +23,13 @@ warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
 def _run_buffer(kill_signal, neuromag2ft_fname):
+    # Works with neuromag2ft-3.0.2
     cmd = (neuromag2ft_fname, '--file', raw_fname, '--speed', '4.0')
 
     process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
     # Let measurement continue for the entire duration
     kill_signal.get(timeout=10.0)
-    print('Terminating subprocess')
     process.terminate()
 
 
@@ -44,25 +45,45 @@ def test_fieldtrip_client():
                                                         neuromag2ft_fname))
     thread.daemon = True
     thread.start()
-
-    # Start the FieldTrip buffer
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        with FieldTripClient(host='localhost', port=1972,
-                             tmax=5, wait_max=1) as rt_client:
-            tmin_samp1 = rt_client.tmin_samp
-
-    time.sleep(1)  # Pause measurement
-    assert_true(len(w) == 1)
-
-    # Start the FieldTrip buffer again
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        with FieldTripClient(host='localhost', port=1972,
-                             tmax=5, wait_max=1) as rt_client:
-            print(rt_client.tmin_samp)
-            tmin_samp2 = rt_client.tmin_samp
-
-    kill_signal.put(False)  # stop the buffer
-    assert_true(tmin_samp2 > tmin_samp1)
-    assert_true(len(w) == 1)
+    time.sleep(0.25)
+
+    try:
+        # Start the FieldTrip buffer
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            with FieldTripClient(host='localhost', port=1972,
+                                 tmax=5, wait_max=1) as rt_client:
+                tmin_samp1 = rt_client.tmin_samp
+
+        time.sleep(1)  # Pause measurement
+        assert_true(len(w) >= 1)
+
+        # Start the FieldTrip buffer again
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            with FieldTripClient(host='localhost', port=1972,
+                                 tmax=5, wait_max=1) as rt_client:
+                raw_info = rt_client.get_measurement_info()
+
+                tmin_samp2 = rt_client.tmin_samp
+                picks = mne.pick_types(raw_info, meg='grad', eeg=False,
+                                       stim=False, eog=False)
+                epoch = rt_client.get_data_as_epoch(n_samples=5, picks=picks)
+                n_channels, n_samples = epoch.get_data().shape[1:]
+
+                epoch2 = rt_client.get_data_as_epoch(n_samples=5, picks=picks)
+                n_channels2, n_samples2 = epoch2.get_data().shape[1:]
+
+        assert_true(tmin_samp2 > tmin_samp1)
+        assert_true(len(w) >= 1)
+        assert_equal(n_samples, 5)
+        assert_equal(n_samples2, 5)
+        assert_equal(n_channels, len(picks))
+        assert_equal(n_channels2, len(picks))
+        kill_signal.put(False)  # stop the buffer
+    except:
+        kill_signal.put(False)  # stop the buffer even if tests fail
+        raise
+
+
+run_tests_if_main()
diff --git a/mne/realtime/tests/test_mockclient.py b/mne/realtime/tests/test_mockclient.py
index d5698a5..4dbb860 100644
--- a/mne/realtime/tests/test_mockclient.py
+++ b/mne/realtime/tests/test_mockclient.py
@@ -4,24 +4,24 @@ from nose.tools import assert_true
 from numpy.testing import assert_array_equal
 
 import mne
-from mne import Epochs, read_events
+from mne import Epochs, read_events, pick_channels
+from mne.utils import run_tests_if_main
 from mne.realtime import MockRtClient, RtEpochs
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
 
-raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
-
 events = read_events(event_name)
 
-picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                       stim=True, exclude=raw.info['bads'])
-
 
 def test_mockclient():
     """Test the RtMockClient."""
 
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
     event_id, tmin, tmax = 1, -0.2, 0.5
 
     epochs = Epochs(raw, events[:7], event_id=event_id, tmin=tmin, tmax=tmax,
@@ -29,7 +29,8 @@ def test_mockclient():
     data = epochs.get_data()
 
     rt_client = MockRtClient(raw)
-    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         isi_max=0.5)
 
     rt_epochs.start()
     rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
@@ -43,6 +44,10 @@ def test_mockclient():
 def test_get_event_data():
     """Test emulation of realtime data stream."""
 
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
     event_id, tmin, tmax = 2, -0.1, 0.3
     epochs = Epochs(raw, events, event_id=event_id,
                     tmin=tmin, tmax=tmax, picks=picks, baseline=None,
@@ -56,3 +61,84 @@ def test_get_event_data():
                                        stim_channel='STI 014')
 
     assert_array_equal(rt_data, data)
+
+
+def test_find_events():
+    """Test find_events in rt_epochs."""
+
+    raw = mne.io.Raw(raw_fname, preload=True, verbose=False)
+    picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
+                           stim=True, exclude=raw.info['bads'])
+
+    event_id = [0, 5, 6]
+    tmin, tmax = -0.2, 0.5
+
+    stim_channel = 'STI 014'
+    stim_channel_idx = pick_channels(raw.info['ch_names'],
+                                     include=[stim_channel])
+
+    # Reset some data for ease of comparison
+    raw._first_samps[0] = 0
+    raw.info['sfreq'] = 1000
+    # Test that we can handle consecutive events with no gap
+    raw._data[stim_channel_idx, :] = 0
+    raw._data[stim_channel_idx, 500:520] = 5
+    raw._data[stim_channel_idx, 520:530] = 6
+    raw._data[stim_channel_idx, 530:532] = 5
+    raw._data[stim_channel_idx, 540] = 6
+    raw._update_times()
+
+    # consecutive=False
+    find_events = dict(consecutive=False)
+
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 1)
+
+    # consecutive=True
+    find_events = dict(consecutive=True)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6, 5, 6]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 3)
+
+    # min_duration=0.002
+    find_events = dict(consecutive=False, min_duration=0.002)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 0)
+
+    # ouput='step', consecutive=True
+    find_events = dict(output='step', consecutive=True)
+    rt_client = MockRtClient(raw)
+    rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
+                         stim_channel='STI 014', isi_max=0.5,
+                         find_events=find_events)
+    rt_client.send_data(rt_epochs, picks, tmin=0, tmax=10, buffer_size=1000)
+    rt_epochs.start()
+    events = [5, 6, 5, 0, 6, 0]
+    for ii, ev in enumerate(rt_epochs.iter_evoked()):
+        assert_true(ev.comment == str(events[ii]))
+    assert_true(ii == 5)
+
+run_tests_if_main()
diff --git a/mne/realtime/tests/test_stim_client_server.py b/mne/realtime/tests/test_stim_client_server.py
index 852af42..b0e5835 100644
--- a/mne/realtime/tests/test_stim_client_server.py
+++ b/mne/realtime/tests/test_stim_client_server.py
@@ -1,16 +1,22 @@
 import threading
 import time
-from nose.tools import assert_equal, assert_raises
+from nose.tools import assert_equal, assert_raises, assert_true
 
 from mne.realtime import StimServer, StimClient
 from mne.externals.six.moves import queue
-from mne.utils import requires_good_network
+from mne.utils import requires_good_network, run_tests_if_main
+
+
+_server = None
+_have_put_in_trigger = False
+_max_wait = 10.
 
 
 @requires_good_network
 def test_connection():
     """Test TCP/IP connection for StimServer <-> StimClient.
     """
+    global _server, _have_put_in_trigger
 
     # have to start a thread to simulate the effect of two
     # different computers since stim_server.start() is designed to
@@ -21,25 +27,27 @@ def test_connection():
     trig_queue2 = queue.Queue()
 
     # start a thread to emulate 1st client
-    thread1 = threading.Thread(target=connect_client, args=(trig_queue1,))
+    thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,))
     thread1.daemon = True
 
     # start another thread to emulate 2nd client
-    thread2 = threading.Thread(target=connect_client, args=(trig_queue2,))
+    thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,))
     thread2.daemon = True
 
+    thread1.start()
+    thread2.start()
     with StimServer('localhost', port=4218, n_clients=2) as stim_server:
-        thread1.start()
-        thread2.start()
-        stim_server.start(timeout=4.0)  # don't allow test to hang
+        _server = stim_server
+        stim_server.start(timeout=10.0)  # don't allow test to hang
 
         # Add the trigger to the queue for both clients
         stim_server.add_trigger(20)
+        _have_put_in_trigger = True  # monkey patch
 
         # the assert_equal must be in the test_connection() method
         # Hence communication between threads is necessary
-        trig1 = trig_queue1.get(timeout=4.0)
-        trig2 = trig_queue2.get(timeout=4.0)
+        trig1 = trig_queue1.get(timeout=_max_wait)
+        trig2 = trig_queue2.get(timeout=_max_wait)
         assert_equal(trig1, 20)
 
         # test if both clients receive the same trigger
@@ -50,18 +58,27 @@ def test_connection():
         assert_raises(StopIteration, stim_server.start, 0.1)
 
 
- at requires_good_network
-def connect_client(trig_queue):
+def _connect_client(trig_queue):
     """Helper method that instantiates the StimClient.
     """
     # just wait till the main thread reaches stim_server.start()
-    time.sleep(2.0)
+    t0 = time.time()
+    while (time.time() - t0 < _max_wait and
+           (_server is None or not _server._running)):
+        time.sleep(0.01)
+    assert_true(_server is not None and _server._running)
 
     # instantiate StimClient
     stim_client = StimClient('localhost', port=4218)
 
-    # wait a bit more for script to reach stim_server.add_trigger()
-    time.sleep(2.0)
+    # wait for script to reach stim_server.add_trigger()
+    t0 = time.time()
+    while (time.time() - t0 < _max_wait and not _have_put_in_trigger):
+        time.sleep(0.01)
+    assert_true(_have_put_in_trigger)
 
     trig_queue.put(stim_client.get_trigger())
     stim_client.close()
+
+
+run_tests_if_main()
diff --git a/mne/report.py b/mne/report.py
index fedc78e..9eabdd6 100644
--- a/mne/report.py
+++ b/mne/report.py
@@ -3,6 +3,7 @@
 
 # Authors: Alex Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Mainak Jas <mainak at neuro.hut.fi>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -15,10 +16,11 @@ import time
 from glob import glob
 import warnings
 import base64
+from datetime import datetime as dt
 
 import numpy as np
 
-from . import read_evokeds, read_events, Covariance
+from . import read_evokeds, read_events, pick_types, read_cov
 from .io import Raw, read_info
 from .utils import _TempDir, logger, verbose, get_subjects_dir
 from .viz import plot_events, plot_trans, plot_cov
@@ -28,14 +30,10 @@ from .epochs import read_epochs
 from .minimum_norm import read_inverse_operator
 from .parallel import parallel_func, check_n_jobs
 
-from .externals.decorator import decorator
 from .externals.tempita import HTMLTemplate, Template
 from .externals.six import BytesIO
 from .externals.six import moves
 
-tempdir = _TempDir()
-temp_fname = op.join(tempdir, 'test')
-
 VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
                     '-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
                     '-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
@@ -48,99 +46,117 @@ SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
 # PLOTTING FUNCTIONS
 
 
- at decorator
-def _check_report_mode(function, *args, **kwargs):
-    """Check whether to actually render or not.
-
-    Parameters
-    ----------
-    function : function
-        Function to be decorated by setting the verbosity level.
+def _fig_to_img(function=None, fig=None, image_format='png',
+                scale=None, **kwargs):
+    """Wrapper function to plot figure and create a binary image"""
 
-    Returns
-    -------
-    dec : function
-        The decorated function
-    """
-
-    if 'MNE_REPORT_TESTING' not in os.environ:
-        return function(*args, **kwargs)
-    else:
-        return ''
-
-
- at _check_report_mode
-def _fig_to_img(function=None, fig=None, close_fig=True, **kwargs):
-    """Wrapper function to plot figure and
-       for fig <-> binary image.
-    """
     import matplotlib.pyplot as plt
+    from matplotlib.figure import Figure
+    if not isinstance(fig, Figure) and function is None:
+        from scipy.misc import imread
+        mayavi = None
+        try:
+            from mayavi import mlab  # noqa, mlab imported
+            import mayavi
+        except:  # on some systems importing Mayavi raises SystemExit (!)
+            warnings.warn('Could not import mayavi. Trying to render '
+                          '`mayavi.core.scene.Scene` figure instances'
+                          ' will throw an error.')
+        tempdir = _TempDir()
+        temp_fname = op.join(tempdir, 'test')
+        if fig.scene is not None:
+            fig.scene.save_png(temp_fname)
+            img = imread(temp_fname)
+            os.remove(temp_fname)
+        else:  # Testing mode
+            img = np.zeros((2, 2, 3))
+
+        mayavi.mlab.close(fig)
+        fig = plt.figure()
+        plt.imshow(img)
+        plt.axis('off')
 
     if function is not None:
         plt.close('all')
         fig = function(**kwargs)
-
     output = BytesIO()
-    fig.savefig(output, format='png', bbox_inches='tight')
-    if close_fig is True:
-        plt.close(fig)
+    if scale is not None:
+        _scale_mpl_figure(fig, scale)
+    fig.savefig(output, format=image_format, bbox_inches='tight',
+                dpi=fig.get_dpi())
+    plt.close(fig)
+    output = output.getvalue()
+    return (output if image_format == 'svg' else
+            base64.b64encode(output).decode('ascii'))
 
-    return base64.b64encode(output.getvalue()).decode('ascii')
 
+def _scale_mpl_figure(fig, scale):
+    """Magic scaling helper
 
- at _check_report_mode
-def _fig_to_mrislice(function, orig_size, sl, **kwargs):
-    import matplotlib.pyplot as plt
-    from PIL import Image
+    Keeps font-size and artist sizes constant
+    0.5 : current font - 4pt
+    2.0 : current font + 4pt
 
+    XXX it's unclear why this works, but good to go for most cases
+    """
+    fig.set_size_inches(fig.get_size_inches() * scale)
+    fig.set_dpi(fig.get_dpi() * scale)
+    import matplotlib as mpl
+    if scale >= 1:
+        sfactor = scale ** 2
+    elif scale < 1:
+        sfactor = -((1. / scale) ** 2)
+    for text in fig.findobj(mpl.text.Text):
+        fs = text.get_fontsize()
+        new_size = fs + sfactor
+        if new_size <= 0:
+            raise ValueError('could not rescale matplotlib fonts, consider '
+                             'increasing "scale"')
+        text.set_fontsize(new_size)
+
+    fig.canvas.draw()
+
+
+def _figs_to_mrislices(sl, n_jobs, **kwargs):
+    import matplotlib.pyplot as plt
     plt.close('all')
-    fig = _plot_mri_contours(**kwargs)
-    temp_sl_fname = temp_fname + str(sl)
-
-    fig_size = fig.get_size_inches()
-    w, h = orig_size[0], orig_size[1]
-    w2, h2 = fig_size[0], fig_size[1]
-    fig.set_size_inches([(w2 / w) * w, (w2 / w) * h])
-    a = fig.gca()
-    a.set_xticks([]), a.set_yticks([])
-    plt.xlim(0, h), plt.ylim(w, 0)
-    fig.savefig(temp_sl_fname, bbox_inches='tight',
-                pad_inches=0, format='png')
-    Image.open(temp_sl_fname).resize((w, h)).save(temp_sl_fname,
-                                                  format='png')
-    output = BytesIO()
-    Image.open(temp_sl_fname).save(output, format='png')
-    return base64.b64encode(output.getvalue()).decode('ascii')
+    use_jobs = min(n_jobs, max(1, len(sl)))
+    parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
+    outs = parallel(p_fun(slices=s, **kwargs)
+                    for s in np.array_split(sl, use_jobs))
+    for o in outs[1:]:
+        outs[0] += o
+    return outs[0]
 
 
- at _check_report_mode
 def _iterate_trans_views(function, **kwargs):
     """Auxiliary function to iterate over views in trans fig.
     """
-    from PIL import Image
+    from scipy.misc import imread
     import matplotlib.pyplot as plt
     import mayavi
-
     fig = function(**kwargs)
 
-    if isinstance(fig, mayavi.core.scene.Scene):
-
-        views = [(90, 90), (0, 90), (0, -90)]
-        fig2, axes = plt.subplots(1, len(views))
-        for view, ax in zip(views, axes):
-            mayavi.mlab.view(view[0], view[1])
-            # XXX: save_bmp / save_png / ...
-            fig.scene.save_bmp(temp_fname)
-            im = Image.open(temp_fname)
-            ax.imshow(im)
-            ax.axis('off')
-
-        img = _fig_to_img(fig=fig2)
-        mayavi.mlab.close(all=True)
-
-        return img
-    else:
-        return None
+    assert isinstance(fig, mayavi.core.scene.Scene)
+
+    views = [(90, 90), (0, 90), (0, -90)]
+    fig2, axes = plt.subplots(1, len(views))
+    for view, ax in zip(views, axes):
+        mayavi.mlab.view(view[0], view[1])
+        # XXX: save_bmp / save_png / ...
+        tempdir = _TempDir()
+        temp_fname = op.join(tempdir, 'test.png')
+        if fig.scene is not None:
+            fig.scene.save_png(temp_fname)
+            im = imread(temp_fname)
+        else:  # Testing mode
+            im = np.zeros((2, 2, 3))
+        ax.imshow(im)
+        ax.axis('off')
+
+    mayavi.mlab.close(fig)
+    img = _fig_to_img(fig=fig2)
+    return img
 
 ###############################################################################
 # TOC FUNCTIONS
@@ -150,6 +166,9 @@ def _is_bad_fname(fname):
     """Auxiliary function for identifying bad file naming patterns
        and highlighting them in red in the TOC.
     """
+    if fname.endswith('(whitened)'):
+        fname = fname[:-11]
+
     if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
         return 'red'
     else:
@@ -201,6 +220,10 @@ def _get_toc_property(fname):
         div_klass = 'mri'
         tooltip = 'MRI'
         text = 'MRI'
+    elif fname.endswith('(whitened)'):
+        div_klass = 'evoked'
+        tooltip = fname
+        text = op.basename(fname[:-11]) + '(whitened)'
     else:
         div_klass = fname.split('-#-')[1]
         tooltip = fname.split('-#-')[0]
@@ -209,10 +232,17 @@ def _get_toc_property(fname):
     return div_klass, tooltip, text
 
 
-def _iterate_files(report, fnames, info, sfreq):
+def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
     """Auxiliary function to parallel process in batch mode.
     """
     htmls, report_fnames, report_sectionlabels = [], [], []
+
+    def _update_html(html, report_fname, report_sectionlabel):
+        """Update the lists above."""
+        htmls.append(html)
+        report_fnames.append(report_fname)
+        report_sectionlabels.append(report_sectionlabel)
+
     for fname in fnames:
         logger.info("Rendering : %s"
                     % op.join('...' + report.data_path[-20:],
@@ -232,7 +262,13 @@ def _iterate_files(report, fnames, info, sfreq):
                 report_fname = fname
                 report_sectionlabel = 'inverse'
             elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
-                html = report._render_evoked(fname)
+                if cov is not None:
+                    html = report._render_whitened_evoked(fname, cov, baseline)
+                    report_fname = fname + ' (whitened)'
+                    report_sectionlabel = 'evoked'
+                    _update_html(html, report_fname, report_sectionlabel)
+
+                html = report._render_evoked(fname, baseline)
                 report_fname = fname
                 report_sectionlabel = 'evoked'
             elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
@@ -243,13 +279,13 @@ def _iterate_files(report, fnames, info, sfreq):
                 html = report._render_epochs(fname)
                 report_fname = fname
                 report_sectionlabel = 'epochs'
-            elif (fname.endswith(('-cov.fif', '-cov.fif.gz'))
-                  and report.info_fname is not None):
+            elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
+                  report.info_fname is not None):
                 html = report._render_cov(fname, info)
                 report_fname = fname
                 report_sectionlabel = 'covariance'
-            elif (fname.endswith(('-trans.fif', '-trans.fif.gz'))
-                  and report.info_fname is not None and report.subjects_dir
+            elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
+                  report.info_fname is not None and report.subjects_dir
                   is not None and report.subject is not None):
                 html = report._render_trans(fname, report.data_path, info,
                                             report.subject,
@@ -261,13 +297,14 @@ def _iterate_files(report, fnames, info, sfreq):
                 report_fname = None
                 report_sectionlabel = None
         except Exception as e:
-            logger.info(e)
+            if on_error == 'warn':
+                logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
+            elif on_error == 'raise':
+                raise
             html = None
             report_fname = None
             report_sectionlabel = None
-        htmls.append(html)
-        report_fnames.append(report_fname)
-        report_sectionlabels.append(report_sectionlabel)
+        _update_html(html, report_fname, report_sectionlabel)
 
     return htmls, report_fnames, report_sectionlabels
 
@@ -300,7 +337,7 @@ def _iterate_sagittal_slices(array, limits=None):
     """Iterate sagittal slice.
     """
     shape = array.shape[0]
-    for ind in xrange(shape):
+    for ind in range(shape):
         if limits and ind not in limits:
             continue
         yield ind, array[ind, :, :]
@@ -310,7 +347,7 @@ def _iterate_axial_slices(array, limits=None):
     """Iterate axial slice.
     """
     shape = array.shape[1]
-    for ind in xrange(shape):
+    for ind in range(shape):
         if limits and ind not in limits:
             continue
         yield ind, array[:, ind, :]
@@ -320,13 +357,14 @@ def _iterate_coronal_slices(array, limits=None):
     """Iterate coronal slice.
     """
     shape = array.shape[2]
-    for ind in xrange(shape):
+    for ind in range(shape):
         if limits and ind not in limits:
             continue
         yield ind, np.flipud(np.rot90(array[:, :, ind]))
 
 
-def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap):
+def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
+                        image_format='png'):
     """Auxiliary function for parallel processing of mri slices.
     """
     img_klass = 'slideimg-%s' % name
@@ -337,34 +375,10 @@ def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap):
     img = _build_image(data, cmap=cmap)
     first = True if ind == 0 else False
     html = _build_html_image(img, slice_id, div_klass,
-                             img_klass, caption,
-                             first)
+                             img_klass, caption, first)
     return ind, html
 
 
-def _iterate_bem_slices(name, global_id, slides_klass, orig_size,
-                        mri_fname, surf_fnames, orientation, sl):
-    """Auxiliary function for parallel processing of bem slices.
-    """
-
-    img_klass = 'slideimg-%s' % name
-    logger.info('Rendering BEM contours : orientation = %s, '
-                'slice = %d' % (orientation, sl))
-    caption = u'Slice %s %s' % (name, sl)
-    slice_id = '%s-%s-%s' % (name, global_id, sl)
-    div_klass = 'span12 %s' % slides_klass
-
-    kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames,
-                  orientation=orientation, slices=[sl],
-                  show=False)
-    img = _fig_to_mrislice(function=_plot_mri_contours,
-                           orig_size=orig_size, sl=sl, **kwargs)
-    first = True if sl == 0 else False
-    return _build_html_image(img, slice_id, div_klass,
-                             img_klass, caption,
-                             first)
-
-
 ###############################################################################
 # HTML functions
 
@@ -390,7 +404,7 @@ slider_template = HTMLTemplate(u"""
                        /*orientation: "vertical",*/
                        min: {{minvalue}},
                        max: {{maxvalue}},
-                       step: 2,
+                       step: {{step}},
                        value: {{startvalue}},
                        create: function(event, ui) {
                        $(".{{klass}}").hide();
@@ -402,16 +416,38 @@ slider_template = HTMLTemplate(u"""
                        })</script>
 """)
 
+slider_full_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{title}}</h4>
+<div class="thumbnail">
+    <ul><li class="slider">
+        <div class="row">
+            <div class="col-md-6 col-md-offset-3">
+                <div id="{{slider_id}}"></div>
+                <ul class="thumbnails">
+                    {{image_html}}
+                </ul>
+                {{html}}
+            </div>
+        </div>
+    </li></ul>
+</div>
+</li>
+""")
+
 
-def _build_html_slider(slices_range, slides_klass, slider_id):
+def _build_html_slider(slices_range, slides_klass, slider_id,
+                       start_value=None):
     """Build an html slider for a given slices range and a slices klass.
     """
-    startvalue = (slices_range[0] + slices_range[-1]) // 2 + 1
+    if start_value is None:
+        start_value = slices_range[len(slices_range) // 2]
     return slider_template.substitute(slider_id=slider_id,
                                       klass=slides_klass,
+                                      step=slices_range[1] - slices_range[0],
                                       minvalue=slices_range[0],
                                       maxvalue=slices_range[-1],
-                                      startvalue=startvalue)
+                                      startvalue=start_value)
 
 
 ###############################################################################
@@ -424,6 +460,17 @@ header_template = Template(u"""
 {{include}}
 <script type="text/javascript">
 
+        var toggle_state = false;
+        $(document).on('keydown', function (event) {
+            if (event.which == 84){
+                if (!toggle_state)
+                    $('.has_toggle').trigger('click');
+                else if (toggle_state)
+                    $('.has_toggle').trigger('click');
+            toggle_state = !toggle_state;
+            }
+        });
+
         function togglebutton(class_name){
             $(class_name).toggle();
 
@@ -535,7 +582,8 @@ div.footer {
 
         <li class="active {{sectionvars[section]}}-btn">
            <a href="javascript:void(0)"
-           onclick="togglebutton('.{{sectionvars[section]}}')">
+           onclick="togglebutton('.{{sectionvars[section]}}')"
+           class="has_toggle">
     {{section if section != 'mri' else 'MRI'}}
            </a>
         </li>
@@ -550,18 +598,28 @@ div.footer {
 footer_template = HTMLTemplate(u"""
 </div></body>
 <div class="footer">
-        © Copyright 2012-2013, MNE Developers.
+        © Copyright 2012-{{current_year}}, MNE Developers.
       Created on {{date}}.
       Powered by <a href="http://martinos.org/mne">MNE.
 </div>
 </html>
 """)
 
+html_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+    <h4>{{caption}}</h4>
+    <div class="thumbnail">{{html}}</div>
+</li>
+""")
+
 image_template = Template(u"""
 
 {{default interactive = False}}
 {{default width = 50}}
 {{default id = False}}
+{{default image_format = 'png'}}
+{{default scale = None}}
+{{default comment = None}}
 
 <li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
 {{if not show}}style="display: none"{{endif}}>
@@ -571,8 +629,38 @@ image_template = Template(u"""
 {{endif}}
 <div class="thumbnail">
 {{if not interactive}}
-    <img alt="" style="width:{{width}}%;"
-    src="data:image/png;base64,{{img}}">
+    {{if image_format == 'png'}}
+        {{if scale is not None}}
+            <img alt="" style="width:{{width}}%;"
+             src="data:image/png;base64,{{img}}">
+        {{else}}
+            <img alt=""
+             src="data:image/png;base64,{{img}}">
+        {{endif}}
+    {{elif image_format == 'gif'}}
+        {{if scale is not None}}
+            <img alt="" style="width:{{width}}%;"
+             src="data:image/gif;base64,{{img}}">
+        {{else}}
+            <img alt=""
+             src="data:image/gif;base64,{{img}}">
+        {{endif}}
+    {{elif image_format == 'svg'}}
+        <div style="text-align:center;">
+            {{img}}
+        </div>
+    {{endif}}
+    {{if comment is not None}}
+        <br><br>
+        <div style="text-align:center;">
+            <style>
+                p.test {word-wrap: break-word;}
+            </style>
+            <p class="test">
+                {{comment}}
+            </p>
+        </div>
+    {{endif}}
 {{else}}
     <center>{{interactive}}</center>
 {{endif}}
@@ -587,6 +675,69 @@ repr_template = Template(u"""
 <hr></li>
 """)
 
+raw_template = Template(u"""
+<li class="{{div_klass}}" id="{{id}}">
+<h4>{{caption}}</h4>
+<table class="table table-hover">
+    <tr>
+        <th>Measurement date</th>
+        {{if meas_date is not None}}
+        <td>{{meas_date}}</td>
+        {{else}}<td>Unknown</td>{{endif}}
+    </tr>
+    <tr>
+        <th>Experimenter</th>
+        {{if info['experimenter'] is not None}}
+        <td>{{info['experimenter']}}</td>
+        {{else}}<td>Unknown</td>{{endif}}
+    </tr>
+    <tr>
+        <th>Digitized points</th>
+        {{if info['dig'] is not None}}
+        <td>{{len(info['dig'])}} points</td>
+        {{else}}
+        <td>Not available</td>
+        {{endif}}
+    </tr>
+    <tr>
+        <th>Good channels</th>
+        <td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
+            and {{n_eeg}} EEG channels</td>
+    </tr>
+    <tr>
+        <th>Bad channels</th>
+        {{if info['bads'] is not None}}
+        <td>{{', '.join(info['bads'])}}</td>
+        {{else}}<td>None</td>{{endif}}
+    </tr>
+    <tr>
+        <th>EOG channels</th>
+        <td>{{eog}}</td>
+    </tr>
+    <tr>
+        <th>ECG channels</th>
+        <td>{{ecg}}</td>
+    <tr>
+        <th>Measurement time range</th>
+        <td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
+    </tr>
+    <tr>
+        <th>Sampling frequency</th>
+        <td>{{u'%0.2f' % info['sfreq']}} Hz</td>
+    </tr>
+    <tr>
+        <th>Highpass</th>
+        <td>{{u'%0.2f' % info['highpass']}} Hz</td>
+    </tr>
+     <tr>
+        <th>Lowpass</th>
+        <td>{{u'%0.2f' % info['lowpass']}} Hz</td>
+    </tr>
+</table>
+</li>
+""")
+
+
 toc_list = Template(u"""
 <li class="{{div_klass}}">
     {{if id}}
@@ -598,6 +749,12 @@ toc_list = Template(u"""
 """)
 
 
+def _check_scale(scale):
+    """Helper to ensure valid scale value is passed"""
+    if np.isscalar(scale) and scale <= 0:
+        raise ValueError('scale must be positive, not %s' % scale)
+
+
 class Report(object):
     """Object for rendering HTML
 
@@ -612,15 +769,36 @@ class Report(object):
         Subject name.
     title : str
         Title of the report.
+    cov_fname : str
+        Name of the file containing the noise covariance.
+    baseline : None or tuple of length 2 (default (None, 0))
+        The time interval to apply baseline correction for evokeds.
+        If None do not apply it. If baseline is (a, b)
+        the interval is between "a (s)" and "b (s)".
+        If a is None the beginning of the data is used
+        and if b is None then b is set to the end of the interval.
+        If baseline is equal to (None, None) all the time
+        interval is used.
+        The baseline (a, b) includes both endpoints, i.e. all
+        timepoints t such that a <= t <= b.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Notes
+    -----
+    To toggle the show/hide state of all sections in the html report, press 't'
+
+    .. versionadded:: 0.8.0
     """
 
-    def __init__(self, info_fname=None, subjects_dir=None, subject=None,
-                 title=None, verbose=None):
+    def __init__(self, info_fname=None, subjects_dir=None,
+                 subject=None, title=None, cov_fname=None, baseline=None,
+                 verbose=None):
 
         self.info_fname = info_fname
-        self.subjects_dir = subjects_dir
+        self.cov_fname = cov_fname
+        self.baseline = baseline
+        self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
         self.subject = subject
         self.title = title
         self.verbose = verbose
@@ -631,8 +809,11 @@ class Report(object):
         self.sections = []  # List of sections
         self._sectionlabels = []  # Section labels
         self._sectionvars = {}  # Section variable names in js
+        # boolean to specify if sections should be ordered in natural
+        # order of processing (raw -> events ... -> inverse)
+        self._sort_sections = False
 
-        self._init_render(verbose=self.verbose)  # Initialize the renderer
+        self._init_render()  # Initialize the renderer
 
     def _get_id(self):
         """Get id of plot.
@@ -640,50 +821,314 @@ class Report(object):
         self.initial_id += 1
         return self.initial_id
 
-    def add_section(self, figs, captions, section='custom'):
+    def _validate_input(self, items, captions, section, comments=None):
+        """Validate input.
+        """
+        if not isinstance(items, (list, tuple)):
+            items = [items]
+        if not isinstance(captions, (list, tuple)):
+            captions = [captions]
+        if not isinstance(comments, (list, tuple)):
+            if comments is None:
+                comments = [comments] * len(captions)
+            else:
+                comments = [comments]
+        if len(comments) != len(items):
+            raise ValueError('Comments and report items must have the same '
+                             'length or comments should be None.')
+        elif len(captions) != len(items):
+            raise ValueError('Captions and report items must have the same '
+                             'length.')
+
+        # Book-keeping of section names
+        if section not in self.sections:
+            self.sections.append(section)
+            self._sectionvars[section] = _clean_varnames(section)
+
+        return items, captions, comments
+
+    def _add_figs_to_section(self, figs, captions, section='custom',
+                             image_format='png', scale=None, comments=None):
+        """Auxiliary method for `add_section` and `add_figs_to_section`.
+        """
+
+        figs, captions, comments = self._validate_input(figs, captions,
+                                                        section, comments)
+        _check_scale(scale)
+        for fig, caption, comment in zip(figs, captions, comments):
+            caption = 'custom plot' if caption == '' else caption
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+            img_klass = self._sectionvars[section]
+
+            img = _fig_to_img(fig=fig, scale=scale,
+                              image_format=image_format)
+            html = image_template.substitute(img=img, id=global_id,
+                                             div_klass=div_klass,
+                                             img_klass=img_klass,
+                                             caption=caption,
+                                             show=True,
+                                             image_format=image_format,
+                                             comment=comment)
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(html)
+
+    def add_figs_to_section(self, figs, captions, section='custom',
+                            scale=None, image_format='png', comments=None):
         """Append custom user-defined figures.
 
         Parameters
         ----------
-        figs : list of matplotlib.pyplot.Figure
-            A list of figures to be included in the report.
+        figs : list of figures.
+            Each figure in the list can be an instance of
+            matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
+            or np.ndarray (images read in using scipy.imread).
         captions : list of str
             A list of captions to the figures.
         section : str
             Name of the section. If section already exists, the figures
             will be appended to the end of the section
+        scale : float | None | callable
+            Scale the images maintaining the aspect ratio.
+            If None, no scaling is applied. If float, scale will determine
+            the relative scaling (might not work for scale <= 1 depending on
+            font sizes). If function, should take a figure object as input
+            parameter. Defaults to None.
+        image_format : {'png', 'svg'}
+            The image format to be used for the report. Defaults to 'png'.
+        comments : None | str | list of str
+            A string of text or a list of strings of text to be appended after
+            the figure.
         """
+        return self._add_figs_to_section(figs=figs, captions=captions,
+                                         section=section, scale=scale,
+                                         image_format=image_format,
+                                         comments=comments)
 
-        if not isinstance(figs, (list, tuple)):
-            figs = [figs]
-        if not isinstance(captions, (list, tuple)):
-            captions = [captions]
-        if not len(figs) == len(captions):
-            raise ValueError('Captions and figures must have the same length.')
-        if section not in self.sections:
-            self.sections.append(section)
-            self._sectionvars[section] = _clean_varnames(section)
+    def add_images_to_section(self, fnames, captions, scale=None,
+                              section='custom', comments=None):
+        """Append custom user-defined images.
 
-        for fig, caption in zip(figs, captions):
+        Parameters
+        ----------
+        fnames : str | list of str
+            A filename or a list of filenames from which images are read.
+            Images can be PNG, GIF or SVG.
+        captions : str | list of str
+            A caption or a list of captions to the images.
+        scale : float | None
+            Scale the images maintaining the aspect ratio.
+            Defaults to None. If None, no scaling will be applied.
+        section : str
+            Name of the section. If section already exists, the images
+            will be appended to the end of the section.
+        comments : None | str | list of str
+            A string of text or a list of strings of text to be appended after
+            the image.
+        """
+        # Note: using scipy.misc is equivalent because scipy internally
+        # imports PIL anyway. It's not possible to redirect image output
+        # to binary string using scipy.misc.
+        fnames, captions, comments = self._validate_input(fnames, captions,
+                                                          section, comments)
+        _check_scale(scale)
+
+        for fname, caption, comment in zip(fnames, captions, comments):
+            caption = 'custom plot' if caption == '' else caption
             sectionvar = self._sectionvars[section]
             global_id = self._get_id()
             div_klass = self._sectionvars[section]
             img_klass = self._sectionvars[section]
-            img = _fig_to_img(fig=fig)
+
+            image_format = os.path.splitext(fname)[1][1:]
+            image_format = image_format.lower()
+
+            if image_format not in ['png', 'gif', 'svg']:
+                raise ValueError("Unknown image format. Only 'png', 'gif' or "
+                                 "'svg' are supported. Got %s" % image_format)
+
+            # Convert image to binary string.
+            output = BytesIO()
+            with open(fname, 'rb') as f:
+                output.write(f.read())
+            img = base64.b64encode(output.getvalue()).decode('ascii')
             html = image_template.substitute(img=img, id=global_id,
+                                             image_format=image_format,
                                              div_klass=div_klass,
                                              img_klass=img_klass,
                                              caption=caption,
+                                             width=scale,
+                                             comment=comment,
                                              show=True)
             self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
             self._sectionlabels.append(sectionvar)
             self.html.append(html)
 
+    def add_htmls_to_section(self, htmls, captions, section='custom'):
+        """Append htmls to the report.
+
+        Parameters
+        ----------
+        htmls : str | list of str
+            An html str or a list of html str.
+        captions : str | list of str
+            A caption or a list of captions to the htmls.
+        section : str
+            Name of the section. If section already exists, the images
+            will be appended to the end of the section.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        htmls, captions, _ = self._validate_input(htmls, captions, section)
+        for html, caption in zip(htmls, captions):
+            caption = 'custom plot' if caption == '' else caption
+            sectionvar = self._sectionvars[section]
+            global_id = self._get_id()
+            div_klass = self._sectionvars[section]
+
+            self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
+            self._sectionlabels.append(sectionvar)
+            self.html.append(
+                html_template.substitute(div_klass=div_klass, id=global_id,
+                                         caption=caption, html=html))
+
+    def add_bem_to_section(self, subject, caption='BEM', section='bem',
+                           decim=2, n_jobs=1, subjects_dir=None):
+        """Renders a bem slider html str.
+
+        Parameters
+        ----------
+        subject : str
+            Subject name.
+        caption : str
+            A caption for the bem.
+        section : str
+            Name of the section. If section already exists, the bem
+            will be appended to the end of the section.
+        decim : int
+            Use this decimation factor for generating MRI/BEM images
+            (since it can be time consuming).
+        n_jobs : int
+          Number of jobs to run in parallel.
+        subjects_dir : str | None
+            Path to the SUBJECTS_DIR. If None, the path is obtained by using
+            the environment variable SUBJECTS_DIR.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
+        """
+        caption = 'custom plot' if caption == '' else caption
+        html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
+                                decim=decim, n_jobs=n_jobs, section=section,
+                                caption=caption)
+        html, caption, _ = self._validate_input(html, caption, section)
+        sectionvar = self._sectionvars[section]
+
+        self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
+        self._sectionlabels.append(sectionvar)
+        self.html.extend(html)
+
+    def add_slider_to_section(self, figs, captions=None, section='custom',
+                              title='Slider', scale=None, image_format='png'):
+        """Renders a slider of figs to the report.
+
+        Parameters
+        ----------
+        figs : list of figures.
+            Each figure in the list can be an instance of
+            matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
+            or np.ndarray (images read in using scipy.imread).
+        captions : list of str | list of float | None
+            A list of captions to the figures. If float, a str will be
+            constructed as `%f s`. If None, it will default to
+            `Data slice %d`.
+        section : str
+            Name of the section. If section already exists, the figures
+            will be appended to the end of the section.
+        title : str
+            The title of the slider.
+        scale : float | None | callable
+            Scale the images maintaining the aspect ratio.
+            If None, no scaling is applied. If float, scale will determine
+            the relative scaling (might not work for scale <= 1 depending on
+            font sizes). If function, should take a figure object as input
+            parameter. Defaults to None.
+        image_format : {'png', 'svg'}
+            The image format to be used for the report. Defaults to 'png'.
+
+        Notes
+        -----
+        .. versionadded:: 0.10.0
+        """
+
+        _check_scale(scale)
+        if not isinstance(figs[0], list):
+            figs = [figs]
+        else:
+            raise NotImplementedError('`add_slider_to_section` '
+                                      'can only add one slider at a time.')
+        figs, _, _ = self._validate_input(figs, section, section)
+
+        sectionvar = self._sectionvars[section]
+        self._sectionlabels.append(sectionvar)
+        global_id = self._get_id()
+        img_klass = self._sectionvars[section]
+        name = 'slider'
+
+        html = []
+        slides_klass = '%s-%s' % (name, global_id)
+        div_klass = 'span12 %s' % slides_klass
+
+        if isinstance(figs[0], list):
+            figs = figs[0]
+        sl = np.arange(0, len(figs))
+        slices = []
+        img_klass = 'slideimg-%s' % name
+
+        if captions is None:
+            captions = ['Data slice %d' % ii for ii in sl]
+        elif isinstance(captions, (list, tuple, np.ndarray)):
+            if len(figs) != len(captions):
+                raise ValueError('Captions must be the same length as the '
+                                 'number of slides.')
+            if isinstance(captions[0], (float, int)):
+                captions = ['%0.3f s' % caption for caption in captions]
+        else:
+            raise TypeError('Captions must be None or an iterable of '
+                            'float, int, str, Got %s' % type(captions))
+        for ii, (fig, caption) in enumerate(zip(figs, captions)):
+            img = _fig_to_img(fig=fig, scale=scale, image_format=image_format)
+            slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
+            first = True if ii == 0 else False
+            slices.append(_build_html_image(img, slice_id, div_klass,
+                          img_klass, caption, first))
+        # Render the slider
+        slider_id = 'select-%s-%s' % (name, global_id)
+        # Render the slices
+        image_html = u'\n'.join(slices)
+        html.append(_build_html_slider(sl, slides_klass, slider_id,
+                                       start_value=0))
+        html = '\n'.join(html)
+
+        slider_klass = sectionvar
+        self.html.append(
+            slider_full_template.substitute(id=global_id, title=title,
+                                            div_klass=slider_klass,
+                                            slider_id=slider_id, html=html,
+                                            image_html=image_html))
+
+        self.fnames.append('%s-#-%s-#-custom' % (section, sectionvar))
+
     ###########################################################################
     # HTML rendering
-    def _render_one_axe(self, slices_iter, name, global_id=None, cmap='gray',
-                        n_jobs=1):
-        """Render one axe of the array.
+    def _render_one_axis(self, slices_iter, name, global_id, cmap,
+                         n_elements, n_jobs):
+        """Render one axis of the array.
         """
         global_id = global_id or name
         html = []
@@ -691,7 +1136,8 @@ class Report(object):
         html.append(u'<div class="col-xs-6 col-md-4">')
         slides_klass = '%s-%s' % (name, global_id)
 
-        parallel, p_fun, _ = parallel_func(_iterate_mri_slices, n_jobs)
+        use_jobs = min(n_jobs, max(1, n_elements))
+        parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
         r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
                      for ind, data in slices_iter)
         slices_range, slices = zip(*r)
@@ -724,17 +1170,18 @@ class Report(object):
             f = open(op.join(op.dirname(__file__), 'html', inc_fname),
                      'r')
             if inc_fname.endswith('.js'):
-                include.append(u'<script type="text/javascript">'
-                               + f.read() + u'</script>')
+                include.append(u'<script type="text/javascript">' +
+                               f.read() + u'</script>')
             elif inc_fname.endswith('.css'):
-                include.append(u'<style type="text/css">'
-                               + f.read() + u'</style>')
+                include.append(u'<style type="text/css">' +
+                               f.read() + u'</style>')
             f.close()
 
         self.include = ''.join(include)
 
     @verbose
-    def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, verbose=None):
+    def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
+                     sort_sections=True, on_error='warn', verbose=None):
         """Renders all the files in the folder.
 
         Parameters
@@ -742,21 +1189,43 @@ class Report(object):
         data_path : str
             Path to the folder containing data whose HTML report will be
             created.
-        pattern : str
-            Filename pattern to include in the report. e.g., -ave.fif will
-            include all evoked files.
+        pattern : str | list of str
+            Filename pattern(s) to include in the report.
+            Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
+            files.
         n_jobs : int
           Number of jobs to run in parallel.
+        mri_decim : int
+            Use this decimation factor for generating MRI/BEM images
+            (since it can be time consuming).
+        sort_sections : bool
+            If True, sort sections in the order: raw -> events -> epochs
+             -> evoked -> covariance -> trans -> mri -> forward -> inverse.
+        on_error : str
+            What to do if a file cannot be rendered. Can be 'ignore',
+            'warn' (default), or 'raise'.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
         """
+        valid_errors = ['ignore', 'warn', 'raise']
+        if on_error not in valid_errors:
+            raise ValueError('on_error must be one of %s, not %s'
+                             % (valid_errors, on_error))
+        self._sort = sort_sections
+
         n_jobs = check_n_jobs(n_jobs)
         self.data_path = data_path
 
         if self.title is None:
             self.title = 'MNE Report for ...%s' % self.data_path[-20:]
 
-        fnames = _recursive_search(self.data_path, pattern)
+        if not isinstance(pattern, (list, tuple)):
+            pattern = [pattern]
+
+        # iterate through the possible patterns
+        fnames = list()
+        for p in pattern:
+            fnames.extend(_recursive_search(self.data_path, p))
 
         if self.info_fname is not None:
             info = read_info(self.info_fname)
@@ -766,10 +1235,18 @@ class Report(object):
                           '-cov.fif(.gz) and -trans.fif(.gz) files.')
             info, sfreq = None, None
 
-        # render plots in parallel
-        parallel, p_fun, _ = parallel_func(_iterate_files, n_jobs)
-        r = parallel(p_fun(self, fname, info, sfreq) for fname in
-                     np.array_split(fnames, n_jobs))
+        cov = None
+        if self.cov_fname is not None:
+            cov = read_cov(self.cov_fname)
+        baseline = self.baseline
+
+        # render plots in parallel; check that n_jobs <= # of files
+        logger.info('Iterating over %s potential files (this may take some '
+                    'time)' % len(fnames))
+        use_jobs = min(n_jobs, max(1, len(fnames)))
+        parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
+        r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
+                     for fname in np.array_split(fnames, use_jobs))
         htmls, report_fnames, report_sectionlabels = zip(*r)
 
         # combine results from n_jobs discarding plots not rendered
@@ -786,10 +1263,9 @@ class Report(object):
 
         # render mri
         if self.subjects_dir is not None and self.subject is not None:
-            self.html.append(self._render_bem(subject=self.subject,
-                                              subjects_dir=
-                                              self.subjects_dir,
-                                              n_jobs=n_jobs))
+            logger.info('Rendering BEM')
+            self.html.append(self._render_bem(self.subject, self.subjects_dir,
+                                              mri_decim, n_jobs))
             self.fnames.append('bem')
             self._sectionlabels.append('mri')
         else:
@@ -818,9 +1294,10 @@ class Report(object):
         else:
             fname = op.realpath(fname)
 
-        self._render_toc(verbose=self.verbose)
+        self._render_toc()
 
-        html = footer_template.substitute(date=time.strftime("%B %d, %Y"))
+        html = footer_template.substitute(date=time.strftime("%B %d, %Y"),
+                                          current_year=time.strftime("%Y"))
         self.html.append(html)
 
         if not overwrite and op.isfile(fname):
@@ -861,12 +1338,13 @@ class Report(object):
         global_id = 1
 
         # Reorder self.sections to reflect natural ordering
-        sections = list(set(self.sections) & set(SECTION_ORDER))
-        custom = [section for section in self.sections if section
-                  not in SECTION_ORDER]
-        order = [sections.index(section) for section in SECTION_ORDER if
-                 section in sections]
-        self.sections = np.array(sections)[order].tolist() + custom
+        if self._sort_sections:
+            sections = list(set(self.sections) & set(SECTION_ORDER))
+            custom = [section for section in self.sections if section
+                      not in SECTION_ORDER]
+            order = [sections.index(section) for section in SECTION_ORDER if
+                     section in sections]
+            self.sections = np.array(sections)[order].tolist() + custom
 
         # Sort by section
         html, fnames, sectionlabels = [], [], []
@@ -883,25 +1361,23 @@ class Report(object):
                     div_klass, tooltip, text = _get_toc_property(fname)
 
                     # loop through conditions for evoked
-                    if fname.endswith(('-ave.fif', '-ave.fif.gz')):
-                       # XXX: remove redundant read_evokeds
+                    if fname.endswith(('-ave.fif', '-ave.fif.gz',
+                                      '(whitened)')):
+                        text = os.path.basename(fname)
+                        if fname.endswith('(whitened)'):
+                            fname = fname[:-11]
+                        # XXX: remove redundant read_evokeds
                         evokeds = read_evokeds(fname, verbose=False)
 
-                        html_toc += toc_list.substitute(div_klass=div_klass,
-                                                        id=None, tooltip=fname,
-                                                        color='#428bca',
-                                                        text=
-                                                        os.path.basename(fname)
-                                                        )
+                        html_toc += toc_list.substitute(
+                            div_klass=div_klass, id=None, tooltip=fname,
+                            color='#428bca', text=text)
 
                         html_toc += u'<li class="evoked"><ul>'
                         for ev in evokeds:
-                            html_toc += toc_list.substitute(div_klass=
-                                                            div_klass,
-                                                            id=global_id,
-                                                            tooltip=fname,
-                                                            color=color,
-                                                            text=ev.comment)
+                            html_toc += toc_list.substitute(
+                                div_klass=div_klass, id=global_id,
+                                tooltip=fname, color=color, text=ev.comment)
                             global_id += 1
                         html_toc += u'</ul></li>'
 
@@ -940,47 +1416,53 @@ class Report(object):
         axial_limit = limits.get('axial')
         axial_slices_gen = _iterate_axial_slices(array, axial_limit)
         html.append(
-            self._render_one_axe(axial_slices_gen, 'axial', global_id, cmap,
-                                 n_jobs=n_jobs))
+            self._render_one_axis(axial_slices_gen, 'axial',
+                                  global_id, cmap, array.shape[1], n_jobs))
         # Sagittal
         sagittal_limit = limits.get('sagittal')
         sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
-        html.append(self._render_one_axe(sagittal_slices_gen, 'sagittal',
-                    global_id, cmap, n_jobs=n_jobs))
+        html.append(
+            self._render_one_axis(sagittal_slices_gen, 'sagittal',
+                                  global_id, cmap, array.shape[1], n_jobs))
         html.append(u'</div>')
         html.append(u'<div class="row">')
         # Coronal
         coronal_limit = limits.get('coronal')
         coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
         html.append(
-            self._render_one_axe(coronal_slices_gen, 'coronal',
-                                 global_id, cmap, n_jobs=n_jobs))
+            self._render_one_axis(coronal_slices_gen, 'coronal',
+                                  global_id, cmap, array.shape[1], n_jobs))
         # Close section
         html.append(u'</div>')
         return '\n'.join(html)
 
-    def _render_one_bem_axe(self, mri_fname, surf_fnames, global_id,
-                            shape, orientation='coronal', n_jobs=1):
-        """Render one axe of bem contours.
+    def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
+                             shape, orientation='coronal', decim=2, n_jobs=1):
+        """Render one axis of bem contours.
         """
-
         orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
         orientation_axis = orientation_name2axis[orientation]
         n_slices = shape[orientation_axis]
         orig_size = np.roll(shape, orientation_axis)[[1, 2]]
 
         name = orientation
-        html, img = [], []
-        slices, slices_range = [], []
+        html = []
         html.append(u'<div class="col-xs-6 col-md-4">')
         slides_klass = '%s-%s' % (name, global_id)
 
-        slices_range = range(0, n_slices, 2)
-
-        parallel, p_fun, _ = parallel_func(_iterate_bem_slices, n_jobs)
-        slices = parallel(p_fun(name, global_id, slides_klass, orig_size,
-                          mri_fname, surf_fnames, orientation, sl)
-                          for sl in slices_range)
+        sl = np.arange(0, n_slices, decim)
+        kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
+                      orientation=orientation, img_output=orig_size)
+        imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
+        slices = []
+        img_klass = 'slideimg-%s' % name
+        div_klass = 'span12 %s' % slides_klass
+        for ii, img in enumerate(imgs):
+            slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
+            caption = u'Slice %s %s' % (name, sl[ii])
+            first = True if ii == 0 else False
+            slices.append(_build_html_image(img, slice_id, div_klass,
+                          img_klass, caption, first))
 
         # Render the slider
         slider_id = 'select-%s-%s' % (name, global_id)
@@ -989,9 +1471,8 @@ class Report(object):
         # Render the slices
         html.append(u'\n'.join(slices))
         html.append(u'</ul>')
-        html.append(_build_html_slider(slices_range, slides_klass, slider_id))
+        html.append(_build_html_slider(sl, slides_klass, slider_id))
         html.append(u'</div>')
-
         return '\n'.join(html)
 
     def _render_image(self, image, cmap='gray', n_jobs=1):
@@ -1029,18 +1510,33 @@ class Report(object):
 
         raw = Raw(raw_fname)
 
-        repr_raw = re.sub('>', '', re.sub('<', '', repr(raw)))
-        repr_info = re.sub('\\n', '\\n</br>',
-                           re.sub('>', '',
-                                  re.sub('<', '',
-                                         repr(raw.info))))
-
-        repr_html = repr_raw + '%s<br/>%s' % (repr_raw, repr_info)
-
-        html = repr_template.substitute(div_klass=div_klass,
-                                        id=global_id,
-                                        caption=caption,
-                                        repr=repr_html)
+        n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
+        n_grad = len(pick_types(raw.info, meg='grad'))
+        n_mag = len(pick_types(raw.info, meg='mag'))
+        pick_eog = pick_types(raw.info, meg=False, eog=True)
+        if len(pick_eog) > 0:
+            eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
+        else:
+            eog = 'Not available'
+        pick_ecg = pick_types(raw.info, meg=False, ecg=True)
+        if len(pick_ecg) > 0:
+            ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
+        else:
+            ecg = 'Not available'
+        meas_date = raw.info['meas_date']
+        if meas_date is not None:
+            meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
+        tmin = raw.first_samp / raw.info['sfreq']
+        tmax = raw.last_samp / raw.info['sfreq']
+
+        html = raw_template.substitute(div_klass=div_klass,
+                                       id=global_id,
+                                       caption=caption,
+                                       info=raw.info,
+                                       meas_date=meas_date,
+                                       n_eeg=n_eeg, n_grad=n_grad,
+                                       n_mag=n_mag, eog=eog,
+                                       ecg=ecg, tmin=tmin, tmax=tmax)
         return html
 
     def _render_forward(self, fwd_fname):
@@ -1071,17 +1567,17 @@ class Report(object):
                                         repr=repr_inv)
         return html
 
-    def _render_evoked(self, evoked_fname, figsize=None):
+    def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
         """Render evoked.
         """
-        evokeds = read_evokeds(evoked_fname, verbose=False)
+        evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
 
         html = []
         for ev in evokeds:
             global_id = self._get_id()
 
             kwargs = dict(show=False)
-            img = _fig_to_img(function=ev.plot, **kwargs)
+            img = _fig_to_img(ev.plot, **kwargs)
 
             caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
             div_klass = 'evoked'
@@ -1092,10 +1588,16 @@ class Report(object):
                                                   img_klass=img_klass,
                                                   caption=caption,
                                                   show=show))
-
-            for ch_type in ['eeg', 'grad', 'mag']:
-                kwargs = dict(ch_type=ch_type, show=False)
-                img = _fig_to_img(function=ev.plot_topomap, **kwargs)
+            has_types = []
+            if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
+                has_types.append('eeg')
+            if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
+                has_types.append('grad')
+            if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
+                has_types.append('mag')
+            for ch_type in has_types:
+                kwargs.update(ch_type=ch_type)
+                img = _fig_to_img(ev.plot_topomap, **kwargs)
                 caption = u'Topomap (ch_type = %s)' % ch_type
                 html.append(image_template.substitute(img=img,
                                                       div_klass=div_klass,
@@ -1112,7 +1614,7 @@ class Report(object):
         events = read_events(eve_fname)
 
         kwargs = dict(events=events, sfreq=sfreq, show=False)
-        img = _fig_to_img(function=plot_events, **kwargs)
+        img = _fig_to_img(plot_events, **kwargs)
 
         caption = 'Events : ' + eve_fname
         div_klass = 'events'
@@ -1132,8 +1634,8 @@ class Report(object):
         global_id = self._get_id()
 
         epochs = read_epochs(epo_fname)
-        kwargs = dict(subject=self.subject, show=False, return_fig=True)
-        img = _fig_to_img(function=epochs.plot_drop_log, **kwargs)
+        kwargs = dict(subject=self.subject, show=False)
+        img = _fig_to_img(epochs.plot_drop_log, **kwargs)
         caption = 'Epochs : ' + epo_fname
         div_klass = 'epochs'
         img_klass = 'epochs'
@@ -1149,11 +1651,10 @@ class Report(object):
         """Render cov.
         """
         global_id = self._get_id()
-        cov = Covariance(cov_fname)
+        cov = read_cov(cov_fname)
         fig, _ = plot_cov(cov, info_fname, show=False)
-
         img = _fig_to_img(fig=fig)
-        caption = 'Covariance : ' + cov_fname
+        caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
         div_klass = 'covariance'
         img_klass = 'covariance'
         show = True
@@ -1164,19 +1665,50 @@ class Report(object):
                                          show=show)
         return html
 
-    def _render_trans(self, trans_fname, path, info, subject,
-                      subjects_dir):
+    def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
+        """Show whitened evoked.
+        """
+        global_id = self._get_id()
+
+        evokeds = read_evokeds(evoked_fname, verbose=False)
+
+        html = []
+        for ev in evokeds:
+
+            ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
+                              verbose=False)
+
+            global_id = self._get_id()
+
+            kwargs = dict(noise_cov=noise_cov, show=False)
+            img = _fig_to_img(ev.plot_white, **kwargs)
+
+            caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
+            div_klass = 'evoked'
+            img_klass = 'evoked'
+            show = True
+            html.append(image_template.substitute(img=img, id=global_id,
+                                                  div_klass=div_klass,
+                                                  img_klass=img_klass,
+                                                  caption=caption,
+                                                  show=show))
+        return '\n'.join(html)
+
+    def _render_trans(self, trans, path, info, subject,
+                      subjects_dir, image_format='png'):
         """Render trans.
         """
-        kwargs = dict(info=info, trans_fname=trans_fname, subject=subject,
+        kwargs = dict(info=info, trans=trans, subject=subject,
                       subjects_dir=subjects_dir)
-        img = _iterate_trans_views(function=plot_trans, **kwargs)
+        try:
+            img = _iterate_trans_views(function=plot_trans, **kwargs)
+        except IOError:
+            img = _iterate_trans_views(function=plot_trans, source='head',
+                                       **kwargs)
 
         if img is not None:
-
             global_id = self._get_id()
-
-            caption = 'Trans : ' + trans_fname
+            caption = 'Trans : ' + trans
             div_klass = 'trans'
             img_klass = 'trans'
             show = True
@@ -1188,7 +1720,8 @@ class Report(object):
                                              show=show)
             return html
 
-    def _render_bem(self, subject, subjects_dir, n_jobs=1):
+    def _render_bem(self, subject, subjects_dir, decim, n_jobs,
+                    section='mri', caption='BEM'):
         """Render mri+bem.
         """
         import nibabel as nib
@@ -1228,25 +1761,22 @@ class Report(object):
 
         global_id = self._get_id()
 
-        if 'mri' not in self.sections:
+        if section == 'mri' and 'mri' not in self.sections:
             self.sections.append('mri')
             self._sectionvars['mri'] = 'mri'
 
-        name, caption = 'BEM', 'BEM contours'
+        name = caption
 
         html += u'<li class="mri" id="%d">\n' % global_id
         html += u'<h2>%s</h2>\n' % name
         html += u'<div class="row">'
-        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
-                                         shape, orientation='axial',
-                                         n_jobs=n_jobs)
-        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
-                                         shape, orientation='sagittal',
-                                         n_jobs=n_jobs)
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'axial', decim, n_jobs)
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'sagittal', decim, n_jobs)
         html += u'</div><div class="row">'
-        html += self._render_one_bem_axe(mri_fname, surf_fnames, global_id,
-                                         shape, orientation='coronal',
-                                         n_jobs=n_jobs)
+        html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
+                                          shape, 'coronal', decim, n_jobs)
         html += u'</div>'
         html += u'</li>\n'
         return ''.join(html)
@@ -1257,10 +1787,9 @@ def _clean_varnames(s):
     # Remove invalid characters
     s = re.sub('[^0-9a-zA-Z_]', '', s)
 
-    # Remove leading characters until we find a letter or underscore
-    s = re.sub('^[^a-zA-Z_]+', '', s)
-
-    return s
+    # add report_ at the beginning so that the javascript class names
+    # are valid ones
+    return 'report_' + s
 
 
 def _recursive_search(path, pattern):
diff --git a/mne/selection.py b/mne/selection.py
index 2b9ab12..cef816a 100644
--- a/mne/selection.py
+++ b/mne/selection.py
@@ -24,9 +24,18 @@ def read_selection(name, fname=None, verbose=None):
     the file. For example, "name = ['temporal', 'Right-frontal']" will produce
     a comination of "Left-temporal", "Right-temporal", and "Right-frontal".
 
-    * The included selections are: "Vertex", "Left-temporal", "Right-temporal",
-    "Left-parietal", "Right-parietal", "Left-occipital", "Right-occipital",
-    "Left-frontal", and "Right-frontal"
+    The included selections are:
+
+        * ``Vertex``
+        * ``Left-temporal``
+        * ``Right-temporal``
+        * ``Left-parietal``
+        * ``Right-parietal``
+        * ``Left-occipital``
+        * ``Right-occipital``
+        * ``Left-frontal``
+        * ``Right-frontal``
+
 
     Parameters
     ----------
diff --git a/mne/simulation/__init__.py b/mne/simulation/__init__.py
index 86402da..081654b 100644
--- a/mne/simulation/__init__.py
+++ b/mne/simulation/__init__.py
@@ -1,6 +1,9 @@
 """Data simulation code
 """
 
-from .evoked import generate_evoked, generate_noise_evoked, add_noise_evoked
-
-from .source import select_source_in_label, generate_sparse_stc, generate_stc
+from .evoked import (generate_evoked, generate_noise_evoked, add_noise_evoked,
+                     simulate_evoked, simulate_noise_evoked)
+from .raw import simulate_raw
+from .source import (select_source_in_label, generate_sparse_stc, generate_stc,
+                     simulate_sparse_stc)
+from .metrics import source_estimate_quantification
diff --git a/mne/simulation/evoked.py b/mne/simulation/evoked.py
index 79a2f3f..d349706 100644
--- a/mne/simulation/evoked.py
+++ b/mne/simulation/evoked.py
@@ -4,56 +4,134 @@
 #
 # License: BSD (3-clause)
 import copy
+import warnings
 
 import numpy as np
-from scipy import signal
 
 from ..io.pick import pick_channels_cov
-from ..utils import check_random_state
 from ..forward import apply_forward
+from ..utils import check_random_state, verbose, _time_mask, deprecated
 
 
-def generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None, tmax=None,
-                    iir_filter=None, random_state=None):
+ at deprecated('"generate_evoked" is deprecated and will be removed in '
+            'MNE-0.11. Please use simulate_evoked instead')
+def generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None,
+                    tmax=None, iir_filter=None, random_state=None,
+                    verbose=None):
     """Generate noisy evoked data
 
     Parameters
     ----------
     fwd : dict
-        a forward solution
+        a forward solution.
     stc : SourceEstimate object
-        The source time courses
-    evoked : Evoked object
-        An instance of evoked used as template
+        The source time courses.
+    evoked : None | Evoked object
+        An instance of evoked used as template.
     cov : Covariance object
         The noise covariance
     snr : float
         signal to noise ratio in dB. It corresponds to
-        10 * log10( var(signal) / var(noise) )
+        10 * log10( var(signal) / var(noise) ).
     tmin : float | None
         start of time interval to estimate SNR. If None first time point
         is used.
-    tmax : float
+    tmax : float | None
         start of time interval to estimate SNR. If None last time point
         is used.
     iir_filter : None | array
-        IIR filter coefficients (denominator) e.g. [1, -1, 0.2]
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
     random_state : None | int | np.random.RandomState
         To specify the random generator state.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     evoked : Evoked object
         The simulated evoked data
     """
-    evoked = apply_forward(fwd, stc, evoked)
-    noise = generate_noise_evoked(evoked, cov, iir_filter, random_state)
-    evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin, tmax=tmax)
+    return simulate_evoked(fwd, stc, evoked.info, cov, snr, tmin,
+                           tmax, iir_filter, random_state, verbose)
+
+
+ at verbose
+def simulate_evoked(fwd, stc, info, cov, snr=3., tmin=None, tmax=None,
+                    iir_filter=None, random_state=None, verbose=None):
+    """Generate noisy evoked data
+
+    Parameters
+    ----------
+    fwd : dict
+        a forward solution.
+    stc : SourceEstimate object
+        The source time courses.
+    info : dict
+        Measurement info to generate the evoked.
+    cov : Covariance object
+        The noise covariance.
+    snr : float
+        signal to noise ratio in dB. It corresponds to
+        10 * log10( var(signal) / var(noise) ).
+    tmin : float | None
+        start of time interval to estimate SNR. If None first time point
+        is used.
+    tmax : float | None
+        start of time interval to estimate SNR. If None last time point
+        is used.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    evoked : Evoked object
+        The simulated evoked data
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    evoked = apply_forward(fwd, stc, info)
+    if snr < np.inf:
+        noise = simulate_noise_evoked(evoked, cov, iir_filter, random_state)
+        evoked_noise = add_noise_evoked(evoked, noise, snr,
+                                        tmin=tmin, tmax=tmax)
+    else:
+        evoked_noise = evoked
     return evoked_noise
 
 
-def generate_noise_evoked(evoked, noise_cov, iir_filter=None,
-                          random_state=None):
+ at deprecated('"generate_noise_evoked" is deprecated and will be removed in '
+            'MNE-0.11. Please use simulate_noise_evoked instead')
+def generate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
+    """Creates noise as a multivariate Gaussian
+
+    The spatial covariance of the noise is given from the cov matrix.
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        An instance of evoked used as template.
+    cov : instance of Covariance
+        The noise covariance.
+    iir_filter : None | array
+        IIR filter coefficients (denominator as it is an AR filter).
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    noise : evoked object
+        an instance of evoked
+    """
+    return simulate_noise_evoked(evoked, cov, iir_filter, random_state)
+
+
+def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
     """Creates noise as a multivariate Gaussian
 
     The spatial covariance of the noise is given from the cov matrix.
@@ -73,17 +151,34 @@ def generate_noise_evoked(evoked, noise_cov, iir_filter=None,
     -------
     noise : evoked object
         an instance of evoked
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
     """
-    noise = copy.deepcopy(evoked)
-    noise_cov = pick_channels_cov(noise_cov, include=noise.info['ch_names'])
+    noise = evoked.copy()
+    noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state,
+                                 evoked.data.shape[1])[0]
+    return noise
+
+
+def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None):
+    """Helper to create spatially colored and temporally IIR-filtered noise"""
+    from scipy.signal import lfilter
+    noise_cov = pick_channels_cov(cov, include=info['ch_names'], exclude=[])
     rng = check_random_state(random_state)
-    n_channels = np.zeros(noise.info['nchan'])
-    n_samples = evoked.data.shape[1]
-    noise.data = rng.multivariate_normal(n_channels, noise_cov.data,
-                                         n_samples).T
+    c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
+    mu_channels = np.zeros(len(c))
+    # we almost always get a positive semidefinite warning here, so squash it
+    with warnings.catch_warnings(record=True):
+        noise = rng.multivariate_normal(mu_channels, c, n_samples).T
     if iir_filter is not None:
-        noise.data = signal.lfilter([1], iir_filter, noise.data, axis=-1)
-    return noise
+        if zi is None:
+            zi = np.zeros((len(c), len(iir_filter) - 1))
+        noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
+    else:
+        zf = None
+    return noise, zf
 
 
 def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
@@ -111,15 +206,9 @@ def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
         An instance of evoked corrupted by noise
     """
     evoked = copy.deepcopy(evoked)
-    times = evoked.times
-    if tmin is None:
-        tmin = np.min(times)
-    if tmax is None:
-        tmax = np.max(times)
-    tmask = (times >= tmin) & (times <= tmax)
-    tmp = np.mean((evoked.data[:, tmask] ** 2).ravel()) / \
-                                     np.mean((noise.data ** 2).ravel())
-    tmp = 10 * np.log10(tmp)
+    tmask = _time_mask(evoked.times, tmin, tmax)
+    tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
+                        np.mean((noise.data ** 2).ravel()))
     noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
     evoked.data += noise.data
     return evoked
diff --git a/mne/simulation/metrics.py b/mne/simulation/metrics.py
new file mode 100644
index 0000000..aede064
--- /dev/null
+++ b/mne/simulation/metrics.py
@@ -0,0 +1,68 @@
+# Authors: Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+from scipy.linalg import norm
+
+# TODO: Add more localization accuracy functions. For example, distance between
+#       true dipole position (in simulated stc) and the centroid of the
+#       estimated activity.
+
+
+def _check_stc(stc1, stc2):
+    """Helper for checking that stcs are compatible"""
+    if stc1.data.shape != stc2.data.shape:
+        raise ValueError('Data in stcs must have the same size')
+    if np.all(stc1.times != stc2.times):
+        raise ValueError('Times of two stcs must match.')
+
+
+def source_estimate_quantification(stc1, stc2, metric='rms'):
+    """Helper function to calculate matrix similarities.
+
+    Parameters
+    ----------
+    stc1 : SourceEstimate
+        First source estimate for comparison.
+    stc2 : SourceEstimate
+        Second source estimate for comparison.
+    metric : str
+        Metric to calculate, 'rms' or 'cosine'.
+
+    Returns
+    -------
+    score : float | array
+        Calculated metric.
+
+    Notes
+    -----
+    Metric calculation has multiple options:
+
+        * rms: Root mean square of difference between stc data matrices.
+        * cosine: Normalized correlation of all elements in stc data matrices.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    known_metrics = ['rms', 'cosine']
+    if metric not in known_metrics:
+        raise ValueError('metric must be a str from the known metrics: '
+                         '"rms" or "cosine"')
+
+    # This is checking that the datas are having the same size meaning
+    # no comparison between distributed and sparse can be done so far.
+    _check_stc(stc1, stc2)
+    data1, data2 = stc1.data, stc2.data
+
+    # Calculate root mean square difference between two matrices
+    if metric == 'rms':
+        score = np.sqrt(np.mean((data1 - data2) ** 2))
+
+    # Calculate correlation coefficient between matrix elements
+    elif metric == 'cosine':
+        score = 1. - (np.dot(data1.flatten(), data2.flatten()) /
+                      (norm(data1) * norm(data2)))
+    return score
diff --git a/mne/simulation/raw.py b/mne/simulation/raw.py
new file mode 100644
index 0000000..39a16c7
--- /dev/null
+++ b/mne/simulation/raw.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+# Authors: Mark Wronkiewicz <wronk at uw.edu>
+#          Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import warnings
+from copy import deepcopy
+
+from .evoked import _generate_noise
+from ..event import _get_stim_channel
+from ..io.pick import pick_types, pick_info, pick_channels
+from ..source_estimate import VolSourceEstimate
+from ..cov import make_ad_hoc_cov, read_cov
+from ..bem import fit_sphere_to_headshape, make_sphere_model, read_bem_solution
+from ..io import RawArray, _BaseRaw
+from ..chpi import get_chpi_positions, _get_hpi_info
+from ..io.constants import FIFF
+from ..forward import (_magnetic_dipole_field_vec, _merge_meg_eeg_fwds,
+                       _stc_src_sel, convert_forward_solution,
+                       _prepare_for_forward, _prep_meg_channels,
+                       _compute_forwards, _to_forward_dict)
+from ..transforms import _get_mri_head_t, transform_surface_to
+from ..source_space import _ensure_src, _points_outside_surface
+from ..source_estimate import _BaseSourceEstimate
+from ..utils import logger, verbose, check_random_state
+from ..externals.six import string_types
+
+
+def _log_ch(start, info, ch):
+    """Helper to log channel information"""
+    if ch is not None:
+        extra, just, ch = ' stored on channel:', 50, info['ch_names'][ch]
+    else:
+        extra, just, ch = ' not stored', 0, ''
+    logger.info((start + extra).ljust(just) + ch)
+
+
+ at verbose
+def simulate_raw(raw, stc, trans, src, bem, cov='simple',
+                 blink=False, ecg=False, chpi=False, head_pos=None,
+                 mindist=1.0, interp='cos2', iir_filter=None, n_jobs=1,
+                 random_state=None, verbose=None):
+    """Simulate raw data with head movements
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        The raw template to use for simulation. The ``info``, ``times``,
+        and potentially ``first_samp`` properties will be used.
+    stc : instance of SourceEstimate
+        The source estimate to use to simulate data. Must have the same
+        sample rate as the raw data.
+    trans : dict | str | None
+        Either a transformation filename (usually made using mne_analyze)
+        or an info dict (usually opened using read_trans()).
+        If string, an ending of `.fif` or `.fif.gz` will be assumed to
+        be in FIF format, any other ending will be assumed to be a text
+        file with a 4x4 transformation matrix (like the `--trans` MNE-C
+        option). If trans is None, an identity transform will be used.
+    src : str | instance of SourceSpaces
+        Source space corresponding to the stc. If string, should be a source
+        space filename. Can also be an instance of loaded or generated
+        SourceSpaces.
+    bem : str | dict
+        BEM solution  corresponding to the stc. If string, should be a BEM
+        solution filename (e.g., "sample-5120-5120-5120-bem-sol.fif").
+    cov : instance of Covariance | str | None
+        The sensor covariance matrix used to generate noise. If None,
+        no noise will be added. If 'simple', a basic (diagonal) ad-hoc
+        noise covariance will be used. If a string, then the covariance
+        will be loaded.
+    blink : bool
+        If true, add simulated blink artifacts. See Notes for details.
+    ecg : bool
+        If true, add simulated ECG artifacts. See Notes for details.
+    chpi : bool
+        If true, simulate continuous head position indicator information.
+        Valid cHPI information must encoded in ``raw.info['hpi_meas']``
+        to use this option.
+
+        .. warning:: This feature is currently experimental.
+
+    head_pos : None | str | dict | tuple
+        Name of the position estimates file. Should be in the format of
+        the files produced by maxfilter. If dict, keys should
+        be the time points and entries should be 4x4 ``dev_head_t``
+        matrices. If None, the original head position (from
+        ``info['dev_head_t']``) will be used. If tuple, should have the
+        same format as data returned by `get_chpi_positions`.
+    mindist : float
+        Minimum distance between sources and the inner skull boundary
+        to use during forward calculation.
+    interp : str
+        Either 'cos2', 'linear', or 'zero', the type of forward-solution
+        interpolation to use between forward solutions at different
+        head positions.
+    iir_filter : None | array
+        IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
+    n_jobs : int
+        Number of jobs to use.
+    random_state : None | int | np.random.RandomState
+        The random generator state used for blink, ECG, and sensor
+        noise randomization.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    raw : instance of Raw
+        The simulated raw file.
+
+    Notes
+    -----
+    Events coded with the position number (starting at 1) will be stored
+    in the trigger channel (if available) at times corresponding to t=0
+    in the ``stc``.
+
+    The resulting SNR will be determined by the structure of the noise
+    covariance, the amplitudes of ``stc``, and the head position(s) provided.
+
+    The blink and ECG artifacts are generated by 1) placing impulses at
+    random times of activation, and 2) convolving with activation kernel
+    functions. In both cases, the scale-factors of the activation functions
+    (and for the resulting EOG and ECG channel traces) were chosen based on
+    visual inspection to yield amplitudes generally consistent with those
+    seen in experimental data. Noisy versions of the blink and ECG
+    activations will be stored in the first EOG and ECG channel in the
+    raw file, respectively, if they exist.
+
+    For blink artifacts:
+
+        1. Random activation times are drawn from an inhomogeneous poisson
+           process whose blink rate oscillates between 4.5 blinks/minute
+           and 17 blinks/minute based on the low (reading) and high (resting)
+           blink rates from [1]_.
+        2. The activation kernel is a 250 ms Hanning window.
+        3. Two activated dipoles are located in the z=0 plane (in head
+           coordinates) at ±30 degrees away from the y axis (nasion).
+        4. Activations affect MEG and EEG channels.
+
+    For ECG artifacts:
+
+        1. Random inter-beat intervals are drawn from a uniform distribution
+           of times corresponding to 40 and 80 beats per minute.
+        2. The activation function is the sum of three Hanning windows with
+           varying durations and scales to make a more complex waveform.
+        3. The activated dipole is located one (estimated) head radius to
+           the left (-x) of head center and three head radii below (+z)
+           head center; this dipole is oriented in the +x direction.
+        4. Activations only affect MEG channels.
+
+    .. versionadded:: 0.10.0
+
+    References
+    ----------
+    .. [1] Bentivoglio et al. "Analysis of blink rate patterns in normal
+           subjects" Movement Disorders, 1997 Nov;12(6):1028-34.
+    """
+    if not isinstance(raw, _BaseRaw):
+        raise TypeError('raw should be an instance of Raw')
+    times, info, first_samp = raw.times, raw.info, raw.first_samp
+    raw_verbose = raw.verbose
+
+    # Check for common flag errors and try to override
+    if not isinstance(stc, _BaseSourceEstimate):
+        raise TypeError('stc must be a SourceEstimate')
+    if not np.allclose(info['sfreq'], 1. / stc.tstep):
+        raise ValueError('stc and info must have same sample rate')
+    if len(stc.times) <= 2:  # to ensure event encoding works
+        raise ValueError('stc must have at least three time points')
+
+    stim = False if len(pick_types(info, meg=False, stim=True)) == 0 else True
+
+    rng = check_random_state(random_state)
+    if interp not in ('cos2', 'linear', 'zero'):
+        raise ValueError('interp must be "cos2", "linear", or "zero"')
+
+    if head_pos is None:  # use pos from file
+        dev_head_ts = [info['dev_head_t']] * 2
+        offsets = np.array([0, len(times)])
+        interp = 'zero'
+    # Use position data to simulate head movement
+    else:
+        if isinstance(head_pos, string_types):
+            head_pos = get_chpi_positions(head_pos, verbose=False)
+        if isinstance(head_pos, tuple):  # can be an already-loaded pos file
+            transs, rots, ts = head_pos
+            ts -= first_samp / info['sfreq']  # MF files need reref
+            dev_head_ts = [np.r_[np.c_[r, t[:, np.newaxis]], [[0, 0, 0, 1]]]
+                           for r, t in zip(rots, transs)]
+            del transs, rots
+        elif isinstance(head_pos, dict):
+            ts = np.array(list(head_pos.keys()), float)
+            ts.sort()
+            dev_head_ts = [head_pos[float(tt)] for tt in ts]
+        else:
+            raise TypeError('unknown head_pos type %s' % type(head_pos))
+        bad = ts < 0
+        if bad.any():
+            raise RuntimeError('All position times must be >= 0, found %s/%s'
+                               '< 0' % (bad.sum(), len(bad)))
+        bad = ts > times[-1]
+        if bad.any():
+            raise RuntimeError('All position times must be <= t_end (%0.1f '
+                               'sec), found %s/%s bad values (is this a split '
+                               'file?)' % (times[-1], bad.sum(), len(bad)))
+        if ts[0] > 0:
+            ts = np.r_[[0.], ts]
+            dev_head_ts.insert(0, info['dev_head_t']['trans'])
+        dev_head_ts = [{'trans': d, 'to': info['dev_head_t']['to'],
+                        'from': info['dev_head_t']['from']}
+                       for d in dev_head_ts]
+        if ts[-1] < times[-1]:
+            dev_head_ts.append(dev_head_ts[-1])
+            ts = np.r_[ts, [times[-1]]]
+        offsets = raw.time_as_index(ts)
+        offsets[-1] = len(times)  # fix for roundoff error
+        assert offsets[-2] != offsets[-1]
+        del ts
+
+    src = _ensure_src(src, verbose=False)
+    if isinstance(bem, string_types):
+        bem = read_bem_solution(bem, verbose=False)
+    if isinstance(cov, string_types):
+        if cov == 'simple':
+            cov = make_ad_hoc_cov(info, verbose=False)
+        else:
+            cov = read_cov(cov, verbose=False)
+    assert np.array_equal(offsets, np.unique(offsets))
+    assert len(offsets) == len(dev_head_ts)
+    approx_events = int((len(times) / info['sfreq']) /
+                        (stc.times[-1] - stc.times[0]))
+    logger.info('Provided parameters will provide approximately %s event%s'
+                % (approx_events, '' if approx_events == 1 else 's'))
+
+    # Extract necessary info
+    meeg_picks = pick_types(info, meg=True, eeg=True, exclude=[])  # for sim
+    meg_picks = pick_types(info, meg=True, eeg=False, exclude=[])  # for CHPI
+    fwd_info = pick_info(info, meeg_picks)
+    fwd_info['projs'] = []  # Ensure no 'projs' applied
+    logger.info('Setting up raw simulation: %s position%s, "%s" interpolation'
+                % (len(dev_head_ts), 's' if len(dev_head_ts) != 1 else '',
+                   interp))
+
+    verts = stc.vertices
+    verts = [verts] if isinstance(stc, VolSourceEstimate) else verts
+    src = _restrict_source_space_to(src, verts)
+
+    # array used to store result
+    raw_data = np.zeros((len(info['ch_names']), len(times)))
+
+    # figure out our cHPI, ECG, and blink dipoles
+    R, r0 = fit_sphere_to_headshape(info, verbose=False)[:2]
+    R /= 1000.
+    r0 /= 1000.
+    ecg_rr = blink_rrs = exg_bem = hpi_rrs = None
+    ecg = ecg and len(meg_picks) > 0
+    chpi = chpi and len(meg_picks) > 0
+    if chpi:
+        hpi_freqs, hpi_rrs, hpi_pick, hpi_on = _get_hpi_info(info)[:4]
+        hpi_nns = hpi_rrs / np.sqrt(np.sum(hpi_rrs * hpi_rrs,
+                                           axis=1))[:, np.newaxis]
+        # turn on cHPI in file
+        raw_data[hpi_pick, :] = hpi_on
+        _log_ch('cHPI status bits enbled and', info, hpi_pick)
+    if blink or ecg:
+        exg_bem = make_sphere_model(r0, head_radius=R,
+                                    relative_radii=(0.97, 0.98, 0.99, 1.),
+                                    sigmas=(0.33, 1.0, 0.004, 0.33),
+                                    verbose=False)
+    if blink:
+        # place dipoles at 45 degree angles in z=0 plane
+        blink_rrs = np.array([[np.cos(np.pi / 3.), np.sin(np.pi / 3.), 0.],
+                              [-np.cos(np.pi / 3.), np.sin(np.pi / 3), 0.]])
+        blink_rrs /= np.sqrt(np.sum(blink_rrs *
+                                    blink_rrs, axis=1))[:, np.newaxis]
+        blink_rrs *= 0.96 * R
+        blink_rrs += r0
+        # oriented upward
+        blink_nns = np.array([[0., 0., 1.], [0., 0., 1.]])
+        # Blink times drawn from an inhomogeneous poisson process
+        # by 1) creating the rate and 2) pulling random numbers
+        blink_rate = (1 + np.cos(2 * np.pi * 1. / 60. * times)) / 2.
+        blink_rate *= 12.5 / 60.
+        blink_rate += 4.5 / 60.
+        blink_data = rng.rand(len(times)) < blink_rate / info['sfreq']
+        blink_data = blink_data * (rng.rand(len(times)) + 0.5)  # varying amps
+        # Activation kernel is a simple hanning window
+        blink_kernel = np.hanning(int(0.25 * info['sfreq']))
+        blink_data = np.convolve(blink_data, blink_kernel,
+                                 'same')[np.newaxis, :] * 1e-7
+        # Add rescaled noisy data to EOG ch
+        ch = pick_types(info, meg=False, eeg=False, eog=True)
+        noise = rng.randn(blink_data.shape[1]) * 5e-6
+        if len(ch) >= 1:
+            ch = ch[-1]
+            raw_data[ch, :] = blink_data * 1e3 + noise
+        else:
+            ch = None
+        _log_ch('Blinks simulated and trace', info, ch)
+        del blink_kernel, blink_rate, noise
+    if ecg:
+        ecg_rr = np.array([[-R, 0, -3 * R]])
+        max_beats = int(np.ceil(times[-1] * 80. / 60.))
+        # activation times with intervals drawn from a uniform distribution
+        # based on activation rates between 40 and 80 beats per minute
+        cardiac_idx = np.cumsum(rng.uniform(60. / 80., 60. / 40., max_beats) *
+                                info['sfreq']).astype(int)
+        cardiac_idx = cardiac_idx[cardiac_idx < len(times)]
+        cardiac_data = np.zeros(len(times))
+        cardiac_data[cardiac_idx] = 1
+        # kernel is the sum of three hanning windows
+        cardiac_kernel = np.concatenate([
+            2 * np.hanning(int(0.04 * info['sfreq'])),
+            -0.3 * np.hanning(int(0.05 * info['sfreq'])),
+            0.2 * np.hanning(int(0.26 * info['sfreq']))], axis=-1)
+        ecg_data = np.convolve(cardiac_data, cardiac_kernel,
+                               'same')[np.newaxis, :] * 15e-8
+        # Add rescaled noisy data to ECG ch
+        ch = pick_types(info, meg=False, eeg=False, ecg=True)
+        noise = rng.randn(ecg_data.shape[1]) * 1.5e-5
+        if len(ch) >= 1:
+            ch = ch[-1]
+            raw_data[ch, :] = ecg_data * 2e3 + noise
+        else:
+            ch = None
+        _log_ch('ECG simulated and trace', info, ch)
+        del cardiac_data, cardiac_kernel, max_beats, cardiac_idx
+
+    stc_event_idx = np.argmin(np.abs(stc.times))
+    if stim:
+        event_ch = pick_channels(info['ch_names'],
+                                 _get_stim_channel(None, info))[0]
+        raw_data[event_ch, :] = 0.
+    else:
+        event_ch = None
+    _log_ch('Event information', info, event_ch)
+    used = np.zeros(len(times), bool)
+    stc_indices = np.arange(len(times)) % len(stc.times)
+    raw_data[meeg_picks, :] = 0.
+    hpi_mag = 70e-9
+    last_fwd = last_fwd_chpi = last_fwd_blink = last_fwd_ecg = src_sel = None
+    zf = None  # final filter conditions for the noise
+    # don't process these any more if no MEG present
+    for fi, (fwd, fwd_blink, fwd_ecg, fwd_chpi) in \
+        enumerate(_iter_forward_solutions(
+            fwd_info, trans, src, bem, exg_bem, dev_head_ts, mindist,
+            hpi_rrs, blink_rrs, ecg_rr, n_jobs)):
+        # must be fixed orientation
+        # XXX eventually we could speed this up by allowing the forward
+        # solution code to only compute the normal direction
+        fwd = convert_forward_solution(fwd, surf_ori=True,
+                                       force_fixed=True, verbose=False)
+        if blink:
+            fwd_blink = fwd_blink['sol']['data']
+            for ii in range(len(blink_rrs)):
+                fwd_blink[:, ii] = np.dot(fwd_blink[:, 3 * ii:3 * (ii + 1)],
+                                          blink_nns[ii])
+            fwd_blink = fwd_blink[:, :len(blink_rrs)]
+            fwd_blink = fwd_blink.sum(axis=1)[:, np.newaxis]
+        # just use one arbitrary direction
+        if ecg:
+            fwd_ecg = fwd_ecg['sol']['data'][:, [0]]
+
+        # align cHPI magnetic dipoles in approx. radial direction
+        if chpi:
+            for ii in range(len(hpi_rrs)):
+                fwd_chpi[:, ii] = np.dot(fwd_chpi[:, 3 * ii:3 * (ii + 1)],
+                                         hpi_nns[ii])
+            fwd_chpi = fwd_chpi[:, :len(hpi_rrs)].copy()
+
+        if src_sel is None:
+            src_sel = _stc_src_sel(fwd['src'], stc)
+            verts = stc.vertices
+            verts = [verts] if isinstance(stc, VolSourceEstimate) else verts
+            diff_ = sum([len(v) for v in verts]) - len(src_sel)
+            if diff_ != 0:
+                warnings.warn('%s STC vertices omitted due to fwd calculation'
+                              % (diff_,))
+        if last_fwd is None:
+            last_fwd, last_fwd_blink, last_fwd_ecg, last_fwd_chpi = \
+                fwd, fwd_blink, fwd_ecg, fwd_chpi
+            continue
+
+        # set up interpolation
+        n_pts = offsets[fi] - offsets[fi - 1]
+        if interp == 'zero':
+            interps = None
+        else:
+            if interp == 'linear':
+                interps = np.linspace(1, 0, n_pts, endpoint=False)
+            else:  # interp == 'cos2':
+                interps = np.cos(0.5 * np.pi * np.arange(n_pts)) ** 2
+            interps = np.array([interps, 1 - interps])
+
+        assert not used[offsets[fi - 1]:offsets[fi]].any()
+        event_idxs = np.where(stc_indices[offsets[fi - 1]:offsets[fi]] ==
+                              stc_event_idx)[0] + offsets[fi - 1]
+        if stim:
+            raw_data[event_ch, event_idxs] = fi
+
+        logger.info('  Simulating data for %0.3f-%0.3f sec with %s event%s'
+                    % (tuple(offsets[fi - 1:fi + 1] / info['sfreq']) +
+                       (len(event_idxs), '' if len(event_idxs) == 1 else 's')))
+
+        # Process data in large chunks to save on memory
+        chunk_size = 10000
+        chunks = np.concatenate((np.arange(offsets[fi - 1], offsets[fi],
+                                           chunk_size), [offsets[fi]]))
+        for start, stop in zip(chunks[:-1], chunks[1:]):
+            assert stop - start <= chunk_size
+
+            used[start:stop] = True
+            if interp == 'zero':
+                this_interp = None
+            else:
+                this_interp = interps[:, start - chunks[0]:stop - chunks[0]]
+            time_sl = slice(start, stop)
+            this_t = np.arange(start, stop) / info['sfreq']
+            stc_idxs = stc_indices[time_sl]
+
+            # simulate brain data
+            raw_data[meeg_picks, time_sl] = \
+                _interp(last_fwd['sol']['data'], fwd['sol']['data'],
+                        stc.data[:, stc_idxs][src_sel], this_interp)
+
+            # add sensor noise, ECG, blink, cHPI
+            if cov is not None:
+                noise, zf = _generate_noise(fwd_info, cov, iir_filter, rng,
+                                            len(stc_idxs), zi=zf)
+                raw_data[meeg_picks, time_sl] += noise
+            if blink:
+                raw_data[meeg_picks, time_sl] += \
+                    _interp(last_fwd_blink, fwd_blink, blink_data[:, time_sl],
+                            this_interp)
+            if ecg:
+                raw_data[meg_picks, time_sl] += \
+                    _interp(last_fwd_ecg, fwd_ecg, ecg_data[:, time_sl],
+                            this_interp)
+            if chpi:
+                sinusoids = np.zeros((len(hpi_freqs), len(stc_idxs)))
+                for fidx, freq in enumerate(hpi_freqs):
+                    sinusoids[fidx] = 2 * np.pi * freq * this_t
+                    sinusoids[fidx] = hpi_mag * np.sin(sinusoids[fidx])
+                raw_data[meg_picks, time_sl] += \
+                    _interp(last_fwd_chpi, fwd_chpi, sinusoids, this_interp)
+
+        assert used[offsets[fi - 1]:offsets[fi]].all()
+
+        # prepare for next iteration
+        last_fwd, last_fwd_blink, last_fwd_ecg, last_fwd_chpi = \
+            fwd, fwd_blink, fwd_ecg, fwd_chpi
+    assert used.all()
+    raw = RawArray(raw_data, info, verbose=False)
+    raw.verbose = raw_verbose
+    logger.info('Done')
+    return raw
+
+
+def _iter_forward_solutions(info, trans, src, bem, exg_bem, dev_head_ts,
+                            mindist, hpi_rrs, blink_rrs, ecg_rrs, n_jobs):
+    """Calculate a forward solution for a subject"""
+    mri_head_t, trans = _get_mri_head_t(trans)
+    logger.info('Setting up forward solutions')
+    megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
+        update_kwargs, bem = _prepare_for_forward(
+            src, mri_head_t, info, bem, mindist, n_jobs, verbose=False)
+    del (src, mindist)
+
+    eegfwd = _compute_forwards(rr, bem, [eegels], [None],
+                               [None], ['eeg'], n_jobs, verbose=False)[0]
+    eegfwd = _to_forward_dict(eegfwd, eegnames)
+    if blink_rrs is not None:
+        eegblink = _compute_forwards(blink_rrs, exg_bem, [eegels], [None],
+                                     [None], ['eeg'], n_jobs,
+                                     verbose=False)[0]
+        eegblink = _to_forward_dict(eegblink, eegnames)
+
+    # short circuit here if there are no MEG channels (don't need to iterate)
+    if len(pick_types(info, meg=True)) == 0:
+        eegfwd.update(**update_kwargs)
+        for _ in dev_head_ts:
+            yield eegfwd, eegblink, None, None
+        return
+
+    coord_frame = FIFF.FIFFV_COORD_HEAD
+    if not bem['is_sphere']:
+        idx = np.where(np.array([s['id'] for s in bem['surfs']]) ==
+                       FIFF.FIFFV_BEM_SURF_ID_BRAIN)[0]
+        assert len(idx) == 1
+        bem_surf = transform_surface_to(bem['surfs'][idx[0]], coord_frame,
+                                        mri_head_t)
+    for ti, dev_head_t in enumerate(dev_head_ts):
+        # Could be *slightly* more efficient not to do this N times,
+        # but the cost here is tiny compared to actual fwd calculation
+        logger.info('Computing gain matrix for transform #%s/%s'
+                    % (ti + 1, len(dev_head_ts)))
+        info = deepcopy(info)
+        info['dev_head_t'] = dev_head_t
+        megcoils, compcoils, megnames, meg_info = \
+            _prep_meg_channels(info, True, [], False, verbose=False)
+
+        # Make sure our sensors are all outside our BEM
+        coil_rr = [coil['r0'] for coil in megcoils]
+        if not bem['is_sphere']:
+            outside = _points_outside_surface(coil_rr, bem_surf, n_jobs,
+                                              verbose=False)
+        else:
+            d = coil_rr - bem['r0']
+            outside = np.sqrt(np.sum(d * d, axis=1)) > bem.radius
+        if not outside.all():
+            raise RuntimeError('%s MEG sensors collided with inner skull '
+                               'surface for transform %s'
+                               % (np.sum(~outside), ti))
+
+        # Compute forward
+        megfwd = _compute_forwards(rr, bem, [megcoils], [compcoils],
+                                   [meg_info], ['meg'], n_jobs,
+                                   verbose=False)[0]
+        megfwd = _to_forward_dict(megfwd, megnames)
+        fwd = _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=False)
+        fwd.update(**update_kwargs)
+
+        fwd_blink = fwd_ecg = fwd_chpi = None
+        if blink_rrs is not None:
+            megblink = _compute_forwards(blink_rrs, exg_bem, [megcoils],
+                                         [compcoils], [meg_info], ['meg'],
+                                         n_jobs, verbose=False)[0]
+            megblink = _to_forward_dict(megblink, megnames)
+            fwd_blink = _merge_meg_eeg_fwds(megblink, eegblink, verbose=False)
+        if ecg_rrs is not None:
+            megecg = _compute_forwards(ecg_rrs, exg_bem, [megcoils],
+                                       [compcoils], [meg_info], ['meg'],
+                                       n_jobs, verbose=False)[0]
+            fwd_ecg = _to_forward_dict(megecg, megnames)
+        if hpi_rrs is not None:
+            fwd_chpi = _magnetic_dipole_field_vec(hpi_rrs, megcoils).T
+        yield fwd, fwd_blink, fwd_ecg, fwd_chpi
+
+
+def _restrict_source_space_to(src, vertices):
+    """Helper to trim down a source space"""
+    assert len(src) == len(vertices)
+    src = deepcopy(src)
+    for s, v in zip(src, vertices):
+        s['inuse'].fill(0)
+        s['nuse'] = len(v)
+        s['vertno'] = v
+        s['inuse'][s['vertno']] = 1
+        del s['pinfo']
+        del s['nuse_tri']
+        del s['use_tris']
+        del s['patch_inds']
+    return src
+
+
+def _interp(data_1, data_2, stc_data, interps):
+    """Helper to interpolate"""
+    out_data = np.dot(data_1, stc_data)
+    if interps is not None:
+        out_data *= interps[0]
+        data_1 = np.dot(data_1, stc_data)
+        data_1 *= interps[1]
+        out_data += data_1
+        del data_1
+    return out_data
diff --git a/mne/simulation/source.py b/mne/simulation/source.py
index 9e3cf8d..45293fe 100644
--- a/mne/simulation/source.py
+++ b/mne/simulation/source.py
@@ -6,8 +6,9 @@
 
 import numpy as np
 
-from ..source_estimate import SourceEstimate
-from ..utils import check_random_state
+from ..source_estimate import SourceEstimate, VolSourceEstimate
+from ..source_space import _ensure_src
+from ..utils import check_random_state, deprecated, logger
 from ..externals.six.moves import zip
 
 
@@ -47,6 +48,8 @@ def select_source_in_label(src, label, random_state=None):
     return lh_vertno, rh_vertno
 
 
+ at deprecated('"generate_sparse_stc" is deprecated and will be removed in'
+            'MNE-0.11. Please use simulate_sparse_stc instead')
 def generate_sparse_stc(src, labels, stc_data, tmin, tstep, random_state=None):
     """Generate sparse sources time courses from waveforms and labels
 
@@ -112,6 +115,95 @@ def generate_sparse_stc(src, labels, stc_data, tmin, tstep, random_state=None):
     return stc
 
 
+def simulate_sparse_stc(src, n_dipoles, times,
+                        data_fun=lambda t: 1e-7 * np.sin(20 * np.pi * t),
+                        labels=None, random_state=None):
+    """Generate sparse (n_dipoles) sources time courses from data_fun
+
+    This function randomly selects n_dipoles vertices in the whole cortex
+    or one single vertex in each label if labels is not None. It uses data_fun
+    to generate waveforms for each vertex.
+
+    Parameters
+    ----------
+    src : instance of SourceSpaces
+        The source space.
+    n_dipoles : int
+        Number of dipoles to simulate.
+    times : array
+        Time array
+    data_fun : callable
+        Function to generate the waveforms. The default is a 100 nAm, 10 Hz
+        sinusoid as ``1e-7 * np.sin(20 * pi * t)``. The function should take
+        as input the array of time samples in seconds and return an array of
+        the same length containing the time courses.
+    labels : None | list of Labels
+        The labels. The default is None, otherwise its size must be n_dipoles.
+    random_state : None | int | np.random.RandomState
+        To specify the random generator state.
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    rng = check_random_state(random_state)
+    src = _ensure_src(src, verbose=False)
+    data = np.zeros((n_dipoles, len(times)))
+    for i_dip in range(n_dipoles):
+        data[i_dip, :] = data_fun(times)
+
+    if labels is None:
+        # can be vol or surface source space
+        offsets = np.linspace(0, n_dipoles, len(src) + 1).astype(int)
+        n_dipoles_ss = np.diff(offsets)
+        # don't use .choice b/c not on old numpy
+        vs = [s['vertno'][np.sort(rng.permutation(np.arange(s['nuse']))[:n])]
+              for n, s in zip(n_dipoles_ss, src)]
+        datas = data
+    else:
+        if n_dipoles != len(labels):
+            logger.warning('The number of labels is different from the number '
+                           'of dipoles. %s dipole(s) will be generated.'
+                           % min(n_dipoles, len(labels)))
+        labels = labels[:n_dipoles] if n_dipoles < len(labels) else labels
+
+        vertno = [[], []]
+        lh_data = [np.empty((0, data.shape[1]))]
+        rh_data = [np.empty((0, data.shape[1]))]
+        for i, label in enumerate(labels):
+            lh_vertno, rh_vertno = select_source_in_label(src, label, rng)
+            vertno[0] += lh_vertno
+            vertno[1] += rh_vertno
+            if len(lh_vertno) != 0:
+                lh_data.append(data[i][np.newaxis])
+            elif len(rh_vertno) != 0:
+                rh_data.append(data[i][np.newaxis])
+            else:
+                raise ValueError('No vertno found.')
+        vs = [np.array(v) for v in vertno]
+        datas = [np.concatenate(d) for d in [lh_data, rh_data]]
+        # need to sort each hemi by vertex number
+        for ii in range(2):
+            order = np.argsort(vs[ii])
+            vs[ii] = vs[ii][order]
+            if len(order) > 0:  # fix for old numpy
+                datas[ii] = datas[ii][order]
+        datas = np.concatenate(datas)
+
+    tmin, tstep = times[0], np.diff(times[:2])[0]
+    assert datas.shape == data.shape
+    cls = SourceEstimate if len(vs) == 2 else VolSourceEstimate
+    stc = cls(datas, vertices=vs, tmin=tmin, tstep=tstep)
+    return stc
+
+
+ at deprecated('"generate_stc" is deprecated and will be removed in'
+            'MNE-0.11. Please use simulate_stc instead')
 def generate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
     """Generate sources time courses from waveforms and labels
 
@@ -149,7 +241,46 @@ def generate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
     stc : SourceEstimate
         The generated source time courses.
     """
+    return simulate_stc(src, labels, stc_data, tmin, tstep, value_fun)
+
+
+def simulate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
+    """Simulate sources time courses from waveforms and labels
+
+    This function generates a source estimate with extended sources by
+    filling the labels with the waveforms given in stc_data.
+
+    By default, the vertices within a label are assigned the same waveform.
+    The waveforms can be scaled for each vertex by using the label values
+    and value_fun. E.g.,
 
+    # create a source label where the values are the distance from the center
+    labels = circular_source_labels('sample', 0, 10, 0)
+
+    # sources with decaying strength (x will be the distance from the center)
+    fun = lambda x: exp(- x / 10)
+    stc = generate_stc(fwd, labels, stc_data, tmin, tstep, fun)
+
+    Parameters
+    ----------
+    src : list of dict
+        The source space
+    labels : list of Labels
+        The labels
+    stc_data : array (shape: len(labels) x n_times)
+        The waveforms
+    tmin : float
+        The beginning of the timeseries
+    tstep : float
+        The time step (1 / sampling frequency)
+    value_fun : function
+        Function to apply to the label values
+
+    Returns
+    -------
+    stc : SourceEstimate
+        The generated source time courses.
+    """
     if len(labels) != len(stc_data):
         raise ValueError('labels and stc_data must have the same length')
 
diff --git a/mne/simulation/tests/test_evoked.py b/mne/simulation/tests/test_evoked.py
index 2f2a348..262a670 100644
--- a/mne/simulation/tests/test_evoked.py
+++ b/mne/simulation/tests/test_evoked.py
@@ -5,23 +5,23 @@
 import os.path as op
 
 import numpy as np
-from numpy.testing import assert_array_almost_equal
+from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_true, assert_raises
 import warnings
 
-from mne.datasets import sample
-from mne import read_label, read_forward_solution
-from mne.time_frequency import morlet
-from mne.simulation import generate_sparse_stc, generate_evoked
+from mne.datasets import testing
+from mne import read_forward_solution
+from mne.simulation import simulate_sparse_stc, simulate_evoked
 from mne import read_cov
 from mne.io import Raw
-from mne import pick_types_evoked, pick_types_forward, read_evokeds
+from mne import pick_types_forward, read_evokeds
+from mne.utils import run_tests_if_main
 
 warnings.simplefilter('always')
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 fwd_fname = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-eeg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
 raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                     'data', 'test_raw.fif')
 ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
@@ -30,7 +30,7 @@ cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                     'data', 'test-cov.fif')
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_simulate_evoked():
     """ Test simulation of evoked data """
 
@@ -38,13 +38,9 @@ def test_simulate_evoked():
     fwd = read_forward_solution(fwd_fname, force_fixed=True)
     fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
     cov = read_cov(cov_fname)
-    label_names = ['Aud-lh', 'Aud-rh']
-    labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
-                         '%s.label' % label)) for label in label_names]
 
     evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
-    evoked_template = pick_types_evoked(evoked_template, meg=True, eeg=True,
-                                        exclude=raw.info['bads'])
+    evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
 
     snr = 6  # dB
     tmin = -0.1
@@ -53,30 +49,27 @@ def test_simulate_evoked():
     n_samples = 600
     times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
 
-    # Generate times series from 2 Morlet wavelets
-    stc_data = np.zeros((len(labels), len(times)))
-    Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
-    stc_data[0][:len(Ws[0])] = np.real(Ws[0])
-    stc_data[1][:len(Ws[1])] = np.real(Ws[1])
-    stc_data *= 100 * 1e-9  # use nAm as unit
-
-    # time translation
-    stc_data[1] = np.roll(stc_data[1], 80)
-    stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
-                              random_state=0)
+    # Generate times series for 2 dipoles
+    stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
+    stc._data *= 1e-9
 
     # Generate noisy evoked data
     iir_filter = [1, -0.9]
-    with warnings.catch_warnings(record=True):
-        warnings.simplefilter('always')  # positive semidefinite warning
-        evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
-                                 tmin=0.0, tmax=0.2, iir_filter=iir_filter)
+    evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
+                             tmin=0.0, tmax=0.2, iir_filter=iir_filter)
     assert_array_almost_equal(evoked.times, stc.times)
     assert_true(len(evoked.data) == len(fwd['sol']['data']))
 
     # make a vertex that doesn't exist in fwd, should throw error
     stc_bad = stc.copy()
     mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
-    stc_bad.vertno[0][0] = mv + 1
-    assert_raises(RuntimeError, generate_evoked, fwd, stc_bad,
-                  evoked_template, cov, snr, tmin=0.0, tmax=0.2)
+    stc_bad.vertices[0][0] = mv + 1
+    assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
+                  evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
+    evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
+                               tmin=0.0, tmax=0.2)
+    evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
+                               tmin=0.0, tmax=0.2)
+    assert_array_equal(evoked_1.data, evoked_2.data)
+
+run_tests_if_main()
diff --git a/mne/simulation/tests/test_metrics.py b/mne/simulation/tests/test_metrics.py
new file mode 100644
index 0000000..c6915ea
--- /dev/null
+++ b/mne/simulation/tests/test_metrics.py
@@ -0,0 +1,52 @@
+# Author: Yousra Bekhti <yousra.bekhti at gmail.com>
+#         Mark Wronkiewicz <wronk at uw.edu>
+#
+# License: BSD (3-clause)
+
+
+import os.path as op
+
+import numpy as np
+from numpy.testing import assert_almost_equal
+from nose.tools import assert_true, assert_raises
+import warnings
+
+from mne import read_source_spaces
+from mne.datasets import testing
+from mne.simulation import simulate_sparse_stc, source_estimate_quantification
+from mne.utils import run_tests_if_main
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+src_fname = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-oct-6-src.fif')
+
+
+ at testing.requires_testing_data
+def test_metrics():
+    """Test simulation metrics"""
+    src = read_source_spaces(src_fname)
+    times = np.arange(600) / 1000.
+    rng = np.random.RandomState(42)
+    stc1 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
+    stc2 = simulate_sparse_stc(src, n_dipoles=2, times=times, random_state=rng)
+    E1_rms = source_estimate_quantification(stc1, stc1, metric='rms')
+    E2_rms = source_estimate_quantification(stc2, stc2, metric='rms')
+    E1_cos = source_estimate_quantification(stc1, stc1, metric='cosine')
+    E2_cos = source_estimate_quantification(stc2, stc2, metric='cosine')
+
+    # ### Tests to add
+    assert_true(E1_rms == 0.)
+    assert_true(E2_rms == 0.)
+    assert_almost_equal(E1_cos, 0.)
+    assert_almost_equal(E2_cos, 0.)
+    stc_bad = stc2.copy().crop(0, 0.5)
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc_bad)
+    stc_bad = stc2.copy()
+    stc_bad.times -= 0.1
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc_bad)
+    assert_raises(ValueError, source_estimate_quantification, stc1, stc2,
+                  metric='foo')
+
+run_tests_if_main()
diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py
new file mode 100644
index 0000000..186ae3e
--- /dev/null
+++ b/mne/simulation/tests/test_raw.py
@@ -0,0 +1,248 @@
+# Authors: Mark Wronkiewicz <wronk at uw.edu>
+#          Yousra Bekhti <yousra.bekhti at gmail.com>
+#          Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import warnings
+from copy import deepcopy
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+from nose.tools import assert_true, assert_raises
+
+from mne import (read_source_spaces, pick_types, read_trans, read_cov,
+                 make_sphere_model, create_info, setup_volume_source_space)
+from mne.chpi import (_calculate_chpi_positions, get_chpi_positions,
+                      _get_hpi_info)
+from mne.tests.test_chpi import _compare_positions
+from mne.datasets import testing
+from mne.simulation import simulate_sparse_stc, simulate_raw
+from mne.io import Raw, RawArray
+from mne.time_frequency import compute_raw_psd
+from mne.utils import _TempDir, run_tests_if_main, requires_version, slow_test
+from mne.fixes import isclose
+
+
+warnings.simplefilter('always')
+
+data_path = testing.data_path(download=False)
+raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+cov_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-cov.fif')
+trans_fname = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+bem_path = op.join(data_path, 'subjects', 'sample', 'bem')
+src_fname = op.join(bem_path, 'sample-oct-2-src.fif')
+bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif')
+
+raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_subsampled.pos')
+
+
+def _make_stc(raw, src):
+    """Helper to make a STC"""
+    seed = 42
+    sfreq = raw.info['sfreq']  # Hz
+    tstep = 1. / sfreq
+    n_samples = len(raw.times) // 10
+    times = np.arange(0, n_samples) * tstep
+    stc = simulate_sparse_stc(src, 10, times, random_state=seed)
+    return stc
+
+
+def _get_data():
+    """Helper to get some starting data"""
+    # raw with ECG channel
+    raw = Raw(raw_fname).crop(0., 5.0).load_data()
+    data_picks = pick_types(raw.info, meg=True, eeg=True)
+    other_picks = pick_types(raw.info, meg=False, stim=True, eog=True)
+    picks = np.sort(np.concatenate((data_picks[::16], other_picks)))
+    raw = raw.pick_channels([raw.ch_names[p] for p in picks])
+    ecg = RawArray(np.zeros((1, len(raw.times))),
+                   create_info(['ECG 063'], raw.info['sfreq'], 'ecg'))
+    for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass',
+                'filename', 'dig'):
+        ecg.info[key] = raw.info[key]
+    raw.add_channels([ecg])
+
+    src = read_source_spaces(src_fname)
+    trans = read_trans(trans_fname)
+    sphere = make_sphere_model('auto', 'auto', raw.info)
+    stc = _make_stc(raw, src)
+    return raw, src, stc, trans, sphere
+
+
+ at testing.requires_testing_data
+def test_simulate_raw_sphere():
+    """Test simulation of raw data with sphere model"""
+    seed = 42
+    raw, src, stc, trans, sphere = _get_data()
+    assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)
+
+    # head pos
+    head_pos_sim = dict()
+    # these will be at 1., 2., ... sec
+    shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]
+
+    for time_key, shift in enumerate(shifts):
+        # Create 4x4 matrix transform and normalize
+        temp_trans = deepcopy(raw.info['dev_head_t'])
+        temp_trans['trans'][:3, 3] += shift
+        head_pos_sim[time_key + 1.] = temp_trans['trans']
+
+    #
+    # Test raw simulation with basic parameters
+    #
+    raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
+                           head_pos=head_pos_sim,
+                           blink=True, ecg=True, random_state=seed)
+    raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
+                             cov_fname, head_pos=head_pos_sim,
+                             blink=True, ecg=True, random_state=seed)
+    assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
+    # Test IO on processed data
+    tempdir = _TempDir()
+    test_outname = op.join(tempdir, 'sim_test_raw.fif')
+    raw_sim.save(test_outname)
+
+    raw_sim_loaded = Raw(test_outname, preload=True, proj=False,
+                         allow_maxshield=True)
+    assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
+    del raw_sim, raw_sim_2
+    # with no cov (no noise) but with artifacts, most time periods should match
+    # but the EOG/ECG channels should not
+    for ecg, eog in ((True, False), (False, True), (True, True)):
+        raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
+                                 cov=None, head_pos=head_pos_sim,
+                                 blink=eog, ecg=ecg, random_state=seed)
+        raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
+                                 cov=None, head_pos=head_pos_sim,
+                                 blink=False, ecg=False, random_state=seed)
+        picks = np.arange(len(raw.ch_names))
+        diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
+        these_picks = np.setdiff1d(picks, diff_picks)
+        close = isclose(raw_sim_3[these_picks][0],
+                        raw_sim_4[these_picks][0], atol=1e-20)
+        assert_true(np.mean(close) > 0.7)
+        far = ~isclose(raw_sim_3[diff_picks][0],
+                       raw_sim_4[diff_picks][0], atol=1e-20)
+        assert_true(np.mean(far) > 0.99)
+    del raw_sim_3, raw_sim_4
+
+    # make sure it works with EEG-only and MEG-only
+    raw_sim_meg = simulate_raw(raw.pick_types(meg=True, eeg=False, copy=True),
+                               stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_eeg = simulate_raw(raw.pick_types(meg=False, eeg=True, copy=True),
+                               stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_meeg = simulate_raw(raw.pick_types(meg=True, eeg=True, copy=True),
+                                stc, trans, src, sphere, cov=None,
+                                ecg=True, blink=True, random_state=seed)
+    assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
+                    raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
+    del raw_sim_meg, raw_sim_eeg, raw_sim_meeg
+
+    # check that different interpolations are similar given small movements
+    raw_sim_cos = simulate_raw(raw, stc, trans, src, sphere,
+                               head_pos=head_pos_sim,
+                               random_state=seed)
+    raw_sim_lin = simulate_raw(raw, stc, trans, src, sphere,
+                               head_pos=head_pos_sim, interp='linear',
+                               random_state=seed)
+    assert_allclose(raw_sim_cos[:][0], raw_sim_lin[:][0],
+                    rtol=1e-5, atol=1e-20)
+    del raw_sim_cos, raw_sim_lin
+
+    # Make impossible transform (translate up into helmet) and ensure failure
+    head_pos_sim_err = deepcopy(head_pos_sim)
+    head_pos_sim_err[1.][2, 3] -= 0.1  # z trans upward 10cm
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  ecg=False, blink=False, head_pos=head_pos_sim_err)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
+                  bem_fname, ecg=False, blink=False,
+                  head_pos=head_pos_sim_err)
+    # other degenerate conditions
+    assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
+    assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
+    assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
+                  trans, src, sphere)
+    stc_bad = stc.copy()
+    stc_bad.tstep += 0.1
+    assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  chpi=True)  # no cHPI info
+    assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
+                  interp='foo')
+    assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=1.)
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=pos_fname)  # ends up with t>t_end
+    head_pos_sim_err = deepcopy(head_pos_sim)
+    head_pos_sim_err[-1.] = head_pos_sim_err[1.]  # negative time
+    assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
+                  head_pos=head_pos_sim_err)
+
+
+ at testing.requires_testing_data
+def test_simulate_raw_bem():
+    """Test simulation of raw data with BEM"""
+    seed = 42
+    raw, src, stc, trans, sphere = _get_data()
+    raw_sim_sph = simulate_raw(raw, stc, trans, src, sphere, cov=None,
+                               ecg=True, blink=True, random_state=seed)
+    raw_sim_bem = simulate_raw(raw, stc, trans, src, bem_fname, cov=None,
+                               ecg=True, blink=True, random_state=seed,
+                               n_jobs=2)
+    # some components (especially radial) might not match that well,
+    # so just make sure that most components have high correlation
+    assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
+    picks = pick_types(raw.info, meg=True, eeg=True)
+    n_ch = len(picks)
+    corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
+    assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
+    assert_true(np.median(np.diag(corr[:n_ch, -n_ch:])) > 0.9)
+
+
+ at slow_test
+ at requires_version('numpy', '1.7')
+ at requires_version('scipy', '0.12')
+ at testing.requires_testing_data
+def test_simulate_raw_chpi():
+    """Test simulation of raw data with cHPI"""
+    with warnings.catch_warnings(record=True):  # MaxShield
+        raw = Raw(raw_chpi_fname, allow_maxshield=True)
+    sphere = make_sphere_model('auto', 'auto', raw.info)
+    # make sparse spherical source space
+    sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
+    src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
+    stc = _make_stc(raw, src)
+    # simulate data with cHPI on
+    raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False)
+    # need to trim extra samples off this one
+    raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
+                            head_pos=pos_fname)
+    # test cHPI indication
+    hpi_freqs, _, hpi_pick, hpi_on, _ = _get_hpi_info(raw.info)
+    assert_allclose(raw_sim[hpi_pick][0], 0.)
+    assert_allclose(raw_chpi[hpi_pick][0], hpi_on)
+    # test that the cHPI signals make some reasonable values
+    psd_sim, freqs_sim = compute_raw_psd(raw_sim)
+    psd_chpi, freqs_chpi = compute_raw_psd(raw_chpi)
+    assert_array_equal(freqs_sim, freqs_chpi)
+    freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f)) for f in hpi_freqs])
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
+    assert_allclose(psd_sim[picks_eeg], psd_chpi[picks_eeg], atol=1e-20)
+    assert_true((psd_chpi[picks_meg][:, freq_idx] >
+                 100 * psd_sim[picks_meg][:, freq_idx]).all())
+    # test localization based on cHPI information
+    trans_sim, rot_sim, t_sim = _calculate_chpi_positions(raw_chpi)
+    trans, rot, t = get_chpi_positions(pos_fname)
+    t -= raw.first_samp / raw.info['sfreq']
+    _compare_positions((trans, rot, t), (trans_sim, rot_sim, t_sim),
+                       max_dist=0.005)
+
+run_tests_if_main()
diff --git a/mne/simulation/tests/test_source.py b/mne/simulation/tests/test_source.py
index e26da75..ee6eb84 100644
--- a/mne/simulation/tests/test_source.py
+++ b/mne/simulation/tests/test_source.py
@@ -4,24 +4,31 @@ import numpy as np
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_true
 
-from mne.datasets import sample
-from mne import read_label, read_forward_solution
+from mne.datasets import testing
+from mne import read_label, read_forward_solution, pick_types_forward
 from mne.label import Label
-from mne.simulation.source import generate_stc, generate_sparse_stc
+from mne.simulation.source import simulate_stc, simulate_sparse_stc
+from mne.utils import run_tests_if_main
 
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 fname_fwd = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
 label_names = ['Aud-lh', 'Aud-rh', 'Vis-rh']
 
 label_names_single_hemi = ['Aud-rh', 'Vis-rh']
 
 
- at sample.requires_sample_data
-def test_generate_stc():
+def read_forward_solution_meg(*args, **kwargs):
+    fwd = read_forward_solution(*args, **kwargs)
+    fwd = pick_types_forward(fwd, meg=True, eeg=False)
+    return fwd
+
+
+ at testing.requires_testing_data
+def test_simulate_stc():
     """ Test generation of source estimate """
-    fwd = read_forward_solution(fname_fwd, force_fixed=True)
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
     labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
                          '%s.label' % label)) for label in label_names]
     mylabels = []
@@ -38,7 +45,7 @@ def test_generate_stc():
     tstep = 1e-3
 
     stc_data = np.ones((len(labels), n_times))
-    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
 
     for label in labels:
         if label.hemi == 'lh':
@@ -46,18 +53,19 @@ def test_generate_stc():
         else:
             hemi_idx = 1
 
-        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
 
         if hemi_idx == 1:
-            idx += len(stc.vertno[0])
+            idx += len(stc.vertices[0])
 
         assert_true(np.all(stc.data[idx] == 1.0))
         assert_true(stc.data[idx].shape[1] == n_times)
 
     # test with function
-    fun = lambda x: x ** 2
-    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+    def fun(x):
+        return x ** 2
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
 
     # the first label has value 0, the second value 2, the third value 6
 
@@ -67,59 +75,46 @@ def test_generate_stc():
         else:
             hemi_idx = 1
 
-        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
 
         if hemi_idx == 1:
-            idx += len(stc.vertno[0])
+            idx += len(stc.vertices[0])
 
         res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
         assert_array_almost_equal(stc.data[idx], res)
 
 
- at sample.requires_sample_data
-def test_generate_sparse_stc():
+ at testing.requires_testing_data
+def test_simulate_sparse_stc():
     """ Test generation of sparse source estimate """
-    fwd = read_forward_solution(fname_fwd, force_fixed=True)
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
     labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
                          '%s.label' % label)) for label in label_names]
 
     n_times = 10
     tmin = 0
     tstep = 1e-3
+    times = np.arange(n_times, dtype=np.float) * tstep + tmin
 
-    stc_data = (np.ones((len(labels), n_times))
-                * np.arange(len(labels))[:, None])
-    stc_1 = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep, 0)
-
-    for i, label in enumerate(labels):
-        if label.hemi == 'lh':
-            hemi_idx = 0
-        else:
-            hemi_idx = 1
-
-        idx = np.intersect1d(stc_1.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc_1.vertno[hemi_idx], idx)
-
-        if hemi_idx == 1:
-            idx += len(stc_1.vertno[0])
-
-        assert_true(np.all(stc_1.data[idx] == float(i)))
+    stc_1 = simulate_sparse_stc(fwd['src'], len(labels), times,
+                                labels=labels, random_state=0)
 
     assert_true(stc_1.data.shape[0] == len(labels))
     assert_true(stc_1.data.shape[1] == n_times)
 
     # make sure we get the same result when using the same seed
-    stc_2 = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep, 0)
+    stc_2 = simulate_sparse_stc(fwd['src'], len(labels), times,
+                                labels=labels, random_state=0)
 
     assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
     assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_generate_stc_single_hemi():
-    """ Test generation of source estimate """
-    fwd = read_forward_solution(fname_fwd, force_fixed=True)
+    """ Test generation of source estimate, single hemi """
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
     labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
                                              'labels', '%s.label' % label))
                           for label in label_names_single_hemi]
@@ -137,7 +132,7 @@ def test_generate_stc_single_hemi():
     tstep = 1e-3
 
     stc_data = np.ones((len(labels_single_hemi), n_times))
-    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
 
     for label in labels_single_hemi:
         if label.hemi == 'lh':
@@ -145,18 +140,19 @@ def test_generate_stc_single_hemi():
         else:
             hemi_idx = 1
 
-        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
 
         if hemi_idx == 1:
-            idx += len(stc.vertno[0])
+            idx += len(stc.vertices[0])
 
         assert_true(np.all(stc.data[idx] == 1.0))
         assert_true(stc.data[idx].shape[1] == n_times)
 
     # test with function
-    fun = lambda x: x ** 2
-    stc = generate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
+    def fun(x):
+        return x ** 2
+    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
 
     # the first label has value 0, the second value 2, the third value 6
 
@@ -166,52 +162,40 @@ def test_generate_stc_single_hemi():
         else:
             hemi_idx = 1
 
-        idx = np.intersect1d(stc.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc.vertno[hemi_idx], idx)
+        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
+        idx = np.searchsorted(stc.vertices[hemi_idx], idx)
 
         if hemi_idx == 1:
-            idx += len(stc.vertno[0])
+            idx += len(stc.vertices[0])
 
         res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
         assert_array_almost_equal(stc.data[idx], res)
 
 
- at sample.requires_sample_data
-def test_generate_sparse_stc_single_hemi():
+ at testing.requires_testing_data
+def test_simulate_sparse_stc_single_hemi():
     """ Test generation of sparse source estimate """
-    fwd = read_forward_solution(fname_fwd, force_fixed=True)
+    fwd = read_forward_solution_meg(fname_fwd, force_fixed=True)
     n_times = 10
     tmin = 0
     tstep = 1e-3
+    times = np.arange(n_times, dtype=np.float) * tstep + tmin
+
     labels_single_hemi = [read_label(op.join(data_path, 'MEG', 'sample',
                                              'labels', '%s.label' % label))
                           for label in label_names_single_hemi]
 
-    stc_data = (np.ones((len(labels_single_hemi), n_times))
-                * np.arange(len(labels_single_hemi))[:, None])
-    stc_1 = generate_sparse_stc(fwd['src'], labels_single_hemi, stc_data,
-                                tmin, tstep, 0)
-
-    for i, label in enumerate(labels_single_hemi):
-        if label.hemi == 'lh':
-            hemi_idx = 0
-        else:
-            hemi_idx = 1
-
-        idx = np.intersect1d(stc_1.vertno[hemi_idx], label.vertices)
-        idx = np.searchsorted(stc_1.vertno[hemi_idx], idx)
-
-        if hemi_idx == 1:
-            idx += len(stc_1.vertno[0])
-
-        assert_true(np.all(stc_1.data[idx] == float(i)))
+    stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
+                                labels=labels_single_hemi, random_state=0)
 
     assert_true(stc_1.data.shape[0] == len(labels_single_hemi))
     assert_true(stc_1.data.shape[1] == n_times)
 
     # make sure we get the same result when using the same seed
-    stc_2 = generate_sparse_stc(fwd['src'], labels_single_hemi, stc_data,
-                                tmin, tstep, 0)
+    stc_2 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
+                                labels=labels_single_hemi, random_state=0)
 
     assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
     assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
+
+run_tests_if_main()
diff --git a/mne/source_estimate.py b/mne/source_estimate.py
index 76936cd..7c20c71 100644
--- a/mne/source_estimate.py
+++ b/mne/source_estimate.py
@@ -5,27 +5,30 @@
 #
 # License: BSD (3-clause)
 
-from .externals.six import string_types
 import os
 import copy
 from math import ceil
+import warnings
+
 import numpy as np
 from scipy import linalg, sparse
-from scipy.sparse import csr_matrix, coo_matrix
-import warnings
+from scipy.sparse import coo_matrix
 
-from ._hdf5 import read_hdf5, write_hdf5
 from .filter import resample
 from .evoked import _get_peak
 from .parallel import parallel_func
 from .surface import (read_surface, _get_ico_surface, read_morph_map,
-                      _compute_nearest)
-from .utils import (get_subjects_dir, _check_subject,
-                    _check_pandas_index_arguments, _check_pandas_installed,
-                    logger, verbose)
+                      _compute_nearest, mesh_edges)
+from .source_space import (_ensure_src, _get_morph_src_reordering,
+                           _ensure_src_subject)
+from .utils import (get_subjects_dir, _check_subject, logger, verbose,
+                    _time_mask)
 from .viz import plot_source_estimates
-from .fixes import in1d
+from .fixes import in1d, sparse_block_diag
+from .io.base import ToDataFrameMixin
 from .externals.six.moves import zip
+from .externals.six import string_types
+from .externals.h5io import read_hdf5, write_hdf5
 
 
 def _read_stc(filename):
@@ -201,7 +204,7 @@ def _write_w(filename, vertices, data):
     # write the vertices and data
     for i in range(vertices_n):
         _write_3(fid, vertices[i])
-        #XXX: without float() endianness is wrong, not sure why
+        # XXX: without float() endianness is wrong, not sure why
         fid.write(np.array(float(data[i]), dtype='>f4').tostring())
 
     # close the file
@@ -319,12 +322,12 @@ def read_source_estimate(fname, subject=None):
         kwargs['tmin'] = 0.0
         kwargs['tstep'] = 1.0
     elif ftype == 'h5':
-        kwargs = read_hdf5(fname + '-stc.h5')
+        kwargs = read_hdf5(fname + '-stc.h5', title='mnepython')
 
     if ftype != 'volume':
         # Make sure the vertices are ordered
         vertices = kwargs['vertices']
-        if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]):
+        if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
             sidx = [np.argsort(verts) for verts in vertices]
             vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
             data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
@@ -347,7 +350,7 @@ def read_source_estimate(fname, subject=None):
 
 
 def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
-    """Helper function to generate either a surface or volume source estimate
+    """Helper function to generate a surface, volume or mixed source estimate
     """
 
     if isinstance(vertices, list) and len(vertices) == 2:
@@ -358,8 +361,12 @@ def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
             and len(vertices) == 1:
         stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
                                 tstep=tstep, subject=subject)
+    elif isinstance(vertices, list) and len(vertices) > 2:
+        # make a mixed source estimate
+        stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,
+                                  tstep=tstep, subject=subject)
     else:
-        raise ValueError('vertices has to be either a list with one or two '
+        raise ValueError('vertices has to be either a list with one or more '
                          'arrays or an array')
     return stc
 
@@ -367,8 +374,9 @@ def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
 def _verify_source_estimate_compat(a, b):
     """Make sure two SourceEstimates are compatible for arith. operations"""
     compat = False
-    if len(a.vertno) == len(b.vertno):
-        if all([np.array_equal(av, vv) for av, vv in zip(a.vertno, b.vertno)]):
+    if len(a.vertices) == len(b.vertices):
+        if all(np.array_equal(av, vv)
+               for av, vv in zip(a.vertices, b.vertices)):
             compat = True
     if not compat:
         raise ValueError('Cannot combine SourceEstimates that do not have the '
@@ -378,7 +386,7 @@ def _verify_source_estimate_compat(a, b):
                          'names, %r and %r' % (a.subject, b.subject))
 
 
-class _BaseSourceEstimate(object):
+class _BaseSourceEstimate(ToDataFrameMixin, object):
     """Abstract base class for source estimates
 
     Parameters
@@ -390,9 +398,9 @@ class _BaseSourceEstimate(object):
         space data corresponds to "numpy.dot(kernel, sens_data)".
     vertices : array | list of two arrays
         Vertex numbers corresponding to the data.
-    tmin : scalar
+    tmin : float
         Time point of the first sample in data.
-    tstep : scalar
+    tstep : float
         Time step between successive samples in data.
     subject : str | None
         The subject name. While not necessary, it is safer to set the
@@ -428,12 +436,11 @@ class _BaseSourceEstimate(object):
                                  'dimensions')
 
         if isinstance(vertices, list):
-            if not (len(vertices) == 2 or len(vertices) == 1) or \
-                    not all([isinstance(v, np.ndarray) for v in vertices]):
-                raise ValueError('Vertices, if a list, must contain one or '
-                                 'two numpy arrays')
+            if not all(isinstance(v, np.ndarray) for v in vertices):
+                raise ValueError('Vertices, if a list, must contain numpy '
+                                 'arrays')
 
-            if any([np.any(np.diff(v.astype(int)) <= 0) for v in vertices]):
+            if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
                 raise ValueError('Vertices must be ordered in increasing '
                                  'order.')
 
@@ -454,7 +461,7 @@ class _BaseSourceEstimate(object):
         self._data = data
         self.tmin = tmin
         self.tstep = tstep
-        self.vertno = vertices
+        self.vertices = vertices
         self.verbose = verbose
         self._kernel = kernel
         self._sens_data = sens_data
@@ -477,18 +484,13 @@ class _BaseSourceEstimate(object):
 
         Parameters
         ----------
-        tmin : float or None
+        tmin : float | None
             The first time point in seconds. If None the first present is used.
-        tmax : float or None
+        tmax : float | None
             The last time point in seconds. If None the last present is used.
         """
-        mask = np.ones(len(self.times), dtype=np.bool)
-        if tmax is not None:
-            mask = mask & (self.times <= tmax)
-        if tmin is not None:
-            mask = mask & (self.times >= tmin)
-            self.tmin = tmin
-
+        mask = _time_mask(self.times, tmin, tmax)
+        self.tmin = self.times[np.where(mask)[0][0]]
         if self._kernel is not None and self._sens_data is not None:
             self._sens_data = self._sens_data[:, mask]
         else:
@@ -536,6 +538,7 @@ class _BaseSourceEstimate(object):
 
     @property
     def data(self):
+        """Numpy array of source estimate data"""
         if self._data is None:
             # compute the solution the first time the data is accessed and
             # remove the kernel and sensor data
@@ -544,6 +547,7 @@ class _BaseSourceEstimate(object):
 
     @property
     def shape(self):
+        """Shape of the data"""
         if self._data is not None:
             return self._data.shape
         return (self._kernel.shape[0], self._sens_data.shape[1])
@@ -572,14 +576,14 @@ class _BaseSourceEstimate(object):
         Returns
         -------
         stc : instance of SourceEstimate
-            The modified stc (note: method operates inplace).
+            The modified stc (method operates inplace).
         """
         data = self.data
         tmax = self.tmin + self.tstep * data.shape[1]
         tmin = (self.tmin + tmax) / 2.
         tstep = tmax - self.tmin
         mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis],
-                                  vertices=self.vertno, tmin=tmin,
+                                  vertices=self.vertices, tmin=tmin,
                                   tstep=tstep, subject=self.subject)
         return mean_stc
 
@@ -663,7 +667,13 @@ class _BaseSourceEstimate(object):
         return self
 
     def sqrt(self):
-        """Return copy of SourceEstimate with sqrt(data)."""
+        """Take the square root
+
+        Returns
+        -------
+        stc : instance of SourceEstimate
+            A copy of the SourceEstimate with sqrt(data).
+        """
         return self ** (0.5)
 
     def copy(self):
@@ -681,9 +691,6 @@ class _BaseSourceEstimate(object):
         ----------
         width : scalar
             Width of the individual bins in seconds.
-        func : callable
-            Function that is applied to summarize the data. Needs to accept a
-            numpy.array as first input and an ``axis`` keyword argument.
         tstart : scalar | None
             Time point where the first bin starts. The default is the first
             time point of the stc.
@@ -691,6 +698,9 @@ class _BaseSourceEstimate(object):
             Last possible time point contained in a bin (if the last bin would
             be shorter than width it is dropped). The default is the last time
             point of the stc.
+        func : callable
+            Function that is applied to summarize the data. Needs to accept a
+            numpy.array as first input and an ``axis`` keyword argument.
 
         Returns
         -------
@@ -711,7 +721,7 @@ class _BaseSourceEstimate(object):
             data[:, i] = func(self.data[:, idx], axis=1)
 
         tmin = times[0] + width / 2.
-        stc = _make_stc(data, vertices=self.vertno,
+        stc = _make_stc(data, vertices=self.vertices,
                         tmin=tmin, tstep=width, subject=self.subject)
         return stc
 
@@ -725,7 +735,7 @@ class _BaseSourceEstimate(object):
         ----------
         func : callable
             The transform to be applied, including parameters (see, e.g.,
-            mne.fixes.partial). The first parameter of the function is the
+            `mne.fixes.partial`). The first parameter of the function is the
             input data. The first return value is the transformed data,
             remaining outputs are ignored. The first dimension of the
             transformed data has to be the same as the first dimension of the
@@ -745,12 +755,13 @@ class _BaseSourceEstimate(object):
         data_t : ndarray
             The transformed data.
 
-        .. note::
-            Applying transforms can be significantly faster if the
-            SourceEstimate object was created using "(kernel, sens_data)", for
-            the "data" parameter as the transform is applied in sensor space.
-            Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
-            this automatically (if possible).
+        Notes
+        -----
+        Applying transforms can be significantly faster if the
+        SourceEstimate object was created using "(kernel, sens_data)", for
+        the "data" parameter as the transform is applied in sensor space.
+        Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
+        this automatically (if possible).
         """
 
         if idx is None:
@@ -841,18 +852,16 @@ class _BaseSourceEstimate(object):
 
         # min and max data indices to include
         times = np.round(1000 * self.times)
-
+        t_idx = np.where(_time_mask(times, tmin, tmax))[0]
         if tmin is None:
             tmin_idx = None
         else:
-            tmin = float(tmin)
-            tmin_idx = np.where(times >= tmin)[0][0]
+            tmin_idx = t_idx[0]
 
         if tmax is None:
             tmax_idx = None
         else:
-            tmax = float(tmax)
-            tmax_idx = np.where(times <= tmax)[0][-1]
+            tmax_idx = t_idx[-1]
 
         data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
                                      tmax_idx=tmax_idx)
@@ -888,76 +897,11 @@ class _BaseSourceEstimate(object):
         else:
             # return new or overwritten stc
             stcs = self if not copy else self.copy()
-            stcs._data, stcs.vertno = data_t, verts
+            stcs._data, stcs.vertices = data_t, verts
             stcs.tmin, stcs.times = tmin, times
 
         return stcs
 
-    def as_data_frame(self, index=None, scale_time=1e3, copy=True):
-        """Represent source estimates as Pandas DataFrame
-
-        Export source estimates in tabular structure with vertices as columns
-        and two additional info columns 'subject' and 'time'.
-        This function is useful to visualize and analyse source time courses
-        with external statistical software such as statsmodels or R.
-
-        Parameters
-        ----------
-        index : tuple of str | None
-            Column to be used as index for the data. Valid string options
-            are 'subject' and 'time'. If None, both info
-            columns will be included in the table as categorial data.
-            If stc.subject is None, only time will be included.
-        scale_time : float
-            Scaling to be applied to time units.
-        copy : bool
-            If true, data will be copied. Else data may be modified in place.
-
-        Returns
-        -------
-        df : instance of DataFrame
-            Source estimates exported into tabular data structure.
-        """
-        pd = _check_pandas_installed()
-
-        default_index = ['subject', 'time']
-        if index is not None:
-            _check_pandas_index_arguments(index, default_index)
-        else:
-            index = default_index
-        if self.subject is None:
-            index.remove('subject')
-
-        data = self.data.T
-        shape = data.shape
-        mindex = list()
-        mindex.append(('time', self.times * scale_time))
-        mindex.append(('subject', np.repeat(self.subject, shape[0])))
-
-        if copy:
-            data = data.copy()
-        assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
-
-        if isinstance(self.vertno, list):
-            # surface source estimates
-            v_names = [i for e in [['%s %i' % ('LH' if ii < 1 else 'RH', vert)
-                       for vert in vertno]
-                       for ii, vertno in enumerate(self.vertno)] for i in e]
-        else:
-            # volume source estimates
-            v_names = ['VOL %d' % vert for vert in self.vertno]
-
-        df = pd.DataFrame(data, columns=v_names)
-        [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]
-
-        if index is not None:
-            if 'time' in index:
-                df['time'] = df['time'].astype(np.int64)
-            with warnings.catch_warnings(record=True):
-                df.set_index(index, inplace=True)
-
-        return df
-
 
 class SourceEstimate(_BaseSourceEstimate):
     """Container for surface source estimates
@@ -987,7 +931,7 @@ class SourceEstimate(_BaseSourceEstimate):
         The subject name.
     times : array of shape (n_times,)
         The time vector.
-    vertno : list of two arrays of shape (n_dipoles,)
+    vertices : list of two arrays of shape (n_dipoles,)
         The indices of the dipoles in the left and right source space.
     data : array of shape (n_dipoles, n_times)
         The data in source space.
@@ -1048,16 +992,16 @@ class SourceEstimate(_BaseSourceEstimate):
                      data=rh_data[:, 0])
         elif ftype == 'h5':
             write_hdf5(fname + '-stc.h5',
-                       dict(vertices=self.vertno, data=self.data,
+                       dict(vertices=self.vertices, data=self.data,
                             tmin=self.tmin, tstep=self.tstep,
-                            subject=self.subject))
+                            subject=self.subject), title='mnepython')
         logger.info('[done]')
 
     def __repr__(self):
-        if isinstance(self.vertno, list):
-            nv = sum([len(v) for v in self.vertno])
+        if isinstance(self.vertices, list):
+            nv = sum([len(v) for v in self.vertices])
         else:
-            nv = self.vertno.size
+            nv = self.vertices.size
         s = "%d vertices" % nv
         if self.subject is not None:
             s += ", subject : %s" % self.subject
@@ -1077,18 +1021,18 @@ class SourceEstimate(_BaseSourceEstimate):
 
     @property
     def lh_vertno(self):
-        return self.vertno[0]
+        return self.vertices[0]
 
     @property
     def rh_vertno(self):
-        return self.vertno[1]
+        return self.vertices[1]
 
     def _hemilabel_stc(self, label):
 
         if label.hemi == 'lh':
-            stc_vertices = self.vertno[0]
+            stc_vertices = self.vertices[0]
         else:
-            stc_vertices = self.vertno[1]
+            stc_vertices = self.vertices[1]
 
         # find index of the Label's vertices
         idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
@@ -1098,7 +1042,7 @@ class SourceEstimate(_BaseSourceEstimate):
 
         # find data
         if label.hemi == 'rh':
-            values = self.data[idx + len(self.vertno[0])]
+            values = self.data[idx + len(self.vertices[0])]
         else:
             values = self.data[idx]
 
@@ -1131,10 +1075,10 @@ class SourceEstimate(_BaseSourceEstimate):
             values = np.vstack((lh_val, rh_val))
         elif label.hemi == 'lh':
             lh_vert, values = self._hemilabel_stc(label)
-            vertices = [lh_vert, np.array([])]
+            vertices = [lh_vert, np.array([], int)]
         elif label.hemi == 'rh':
             rh_vert, values = self._hemilabel_stc(label)
-            vertices = [np.array([]), rh_vert]
+            vertices = [np.array([], int), rh_vert]
         else:
             raise TypeError("Expected  Label or BiHemiLabel; got %r" % label)
 
@@ -1146,15 +1090,15 @@ class SourceEstimate(_BaseSourceEstimate):
                                    subject=self.subject)
         return label_stc
 
-    def expand(self, vertno):
+    def expand(self, vertices):
         """Expand SourceEstimate to include more vertices
 
-        This will add rows to stc.data (zero-filled) and modify stc.vertno
-        to include all vertices in stc.vertno and the input vertno.
+        This will add rows to stc.data (zero-filled) and modify stc.vertices
+        to include all vertices in stc.vertices and the input vertices.
 
         Parameters
         ----------
-        vertno : list of array
+        vertices : list of array
             New vertices to add. Can also contain old values.
 
         Returns
@@ -1162,23 +1106,24 @@ class SourceEstimate(_BaseSourceEstimate):
         stc : instance of SourceEstimate
             The modified stc (note: method operates inplace).
         """
-        if not isinstance(vertno, list):
-            raise TypeError('vertno must be a list')
-        if not len(self.vertno) == len(vertno):
-            raise ValueError('vertno must have the same length as stc.vertno')
+        if not isinstance(vertices, list):
+            raise TypeError('vertices must be a list')
+        if not len(self.vertices) == len(vertices):
+            raise ValueError('vertices must have the same length as '
+                             'stc.vertices')
 
         # can no longer use kernel and sensor data
         self._remove_kernel_sens_data_()
 
         inserters = list()
         offsets = [0]
-        for vi, (v_old, v_new) in enumerate(zip(self.vertno, vertno)):
+        for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
             v_new = np.setdiff1d(v_new, v_old)
             inds = np.searchsorted(v_old, v_new)
             # newer numpy might overwrite inds after np.insert, copy here
             inserters += [inds.copy()]
             offsets += [len(v_old)]
-            self.vertno[vi] = np.insert(v_old, inds, v_new)
+            self.vertices[vi] = np.insert(v_old, inds, v_new)
         inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
         inds = np.concatenate(inds)
         new_data = np.zeros((len(inds), self._data.shape[1]))
@@ -1194,21 +1139,22 @@ class SourceEstimate(_BaseSourceEstimate):
         time courses are extracted depends on the mode parameter.
 
         Valid values for mode are:
-        'mean': Average within each label.
-        'mean_flip': Average within each label with sign flip depending on
-        source orientation.
-        'pca_flip': Apply an SVD to the time courses within each label and use
-        the scaled and sign-flipped first right-singular vector as the label
-        time course. The scaling is performed such that the power of the label
-        time course is the same as the average per-vertex time course power
-        within the label. The sign of the resulting time course is adjusted by
-        multiplying it with "sign(dot(u, flip))" where u is the first
-        left-singular vector, and flip is a sing-flip vector based on the
-        vertex normals. This procedure assures that the phase does not
-        randomly change by 180 degrees from one stc to the next.
-
-        See also mne.extract_label_time_course to extract time courses for a
-        list of SourceEstimate more efficiently.
+
+            - 'mean': Average within each label.
+            - 'mean_flip': Average within each label with sign flip depending
+              on source orientation.
+            - 'pca_flip': Apply an SVD to the time courses within each label
+              and use the scaled and sign-flipped first right-singular vector
+              as the label time course. The scaling is performed such that the
+              power of the label time course is the same as the average
+              per-vertex time course power within the label. The sign of the
+              resulting time course is adjusted by multiplying it with
+              "sign(dot(u, flip))" where u is the first left-singular vector,
+              and flip is a sing-flip vector based on the vertex normals. This
+              procedure assures that the phase does not randomly change by 180
+              degrees from one stc to the next.
+            - 'max': Max value within each label.
+
 
         Parameters
         ----------
@@ -1228,6 +1174,10 @@ class SourceEstimate(_BaseSourceEstimate):
         -------
         label_tc : array, shape=(len(labels), n_times)
             Extracted time course for each label.
+
+        See Also
+        --------
+        extract_label_time_course : extract time courses for multiple STCs
         """
         label_tc = extract_label_time_course(self, labels, src, mode=mode,
                                              return_generator=False,
@@ -1285,18 +1235,18 @@ class SourceEstimate(_BaseSourceEstimate):
         subject = _check_subject(self.subject, subject)
 
         values = np.sum(self.data, axis=1)  # sum across time
-        vert_inds = [np.arange(len(self.vertno[0])),
-                     np.arange(len(self.vertno[1])) + len(self.vertno[0])]
+        vert_inds = [np.arange(len(self.vertices[0])),
+                     np.arange(len(self.vertices[1])) + len(self.vertices[0])]
         if hemi is None:
             hemi = np.where(np.array([np.sum(values[vi])
                             for vi in vert_inds]))[0]
             if not len(hemi) == 1:
                 raise ValueError('Could not infer hemisphere')
             hemi = hemi[0]
-        if not hemi in [0, 1]:
+        if hemi not in [0, 1]:
             raise ValueError('hemi must be 0 or 1')
 
-        subjects_dir = get_subjects_dir(subjects_dir)
+        subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
 
         values = values[vert_inds[hemi]]
 
@@ -1310,12 +1260,12 @@ class SourceEstimate(_BaseSourceEstimate):
         if restrict_vertices is False:
             restrict_vertices = np.arange(surf[0].shape[0])
         elif restrict_vertices is True:
-            restrict_vertices = self.vertno[hemi]
+            restrict_vertices = self.vertices[hemi]
 
         if np.any(self.data < 0):
             raise ValueError('Cannot compute COM with negative values')
 
-        pos = surf[0][self.vertno[hemi], :].T
+        pos = surf[0][self.vertices[hemi], :].T
         c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
 
         # Find the vertex closest to the COM
@@ -1330,11 +1280,10 @@ class SourceEstimate(_BaseSourceEstimate):
         return vertex, hemi, t
 
     def plot(self, subject=None, surface='inflated', hemi='lh',
-             colormap='hot', time_label='time=%0.2f ms',
-             smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
-             transparent=True, alpha=1.0, time_viewer=False,
-             config_opts={}, subjects_dir=None, figure=None,
-             views='lat', colorbar=True):
+             colormap='auto', time_label='time=%0.2f ms',
+             smoothing_steps=10, transparent=None, alpha=1.0,
+             time_viewer=False, config_opts=None, subjects_dir=None,
+             figure=None, views='lat', colorbar=True, clim='auto'):
         """Plot SourceEstimates with PySurfer
 
         Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
@@ -1347,8 +1296,6 @@ class SourceEstimate(_BaseSourceEstimate):
 
         Parameters
         ----------
-        stc : SourceEstimates
-            The source estimates to plot.
         subject : str | None
             The subject name corresponding to FreeSurfer environment
             variable SUBJECT. If None stc.subject will be used. If that
@@ -1356,22 +1303,19 @@ class SourceEstimate(_BaseSourceEstimate):
         surface : str
             The type of surface (inflated, white etc.).
         hemi : str, 'lh' | 'rh' | 'split' | 'both'
-            The hemisphere to display. Using 'both' or 'split' requires
-            PySurfer version 0.4 or above.
-        colormap : str
-            The type of colormap to use.
+            The hemisphere to display.
+        colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+            Name of colormap to use or a custom look up table. If array, must
+            be (n x 3) or (n x 4) array for with RGB or RGBA values between
+            0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
+            based on whether 'lims' or 'pos_lims' are specified in `clim`.
         time_label : str
             How to print info about the time instant visualized.
         smoothing_steps : int
             The amount of smoothing.
-        fmin : float
-            The minimum value to display.
-        fmid : float
-            The middle value on the colormap.
-        fmax : float
-            The maximum value for the colormap.
-        transparent : bool
+        transparent : bool | None
             If True, use a linear transparency between fmin and fmid.
+            None will choose automatically based on colormap type.
         alpha : float
             Alpha value to apply globally to the overlay.
         time_viewer : bool
@@ -1389,6 +1333,21 @@ class SourceEstimate(_BaseSourceEstimate):
             View to use. See surfer.Brain().
         colorbar : bool
             If True, display colorbar on scene.
+        clim : str | dict
+            Colorbar properties specification. If 'auto', set clim
+            automatically based on data percentiles. If dict, should contain:
+
+                kind : str
+                    Flag to specify type of limits. 'value' or 'percent'.
+                lims : list | np.ndarray | tuple of float, 3 elements
+                    Note: Only use this if 'colormap' is not 'mne'.
+                    Left, middle, and right bound for colormap.
+                pos_lims : list | np.ndarray | tuple of float, 3 elements
+                    Note: Only use this if 'colormap' is 'mne'.
+                    Left, middle, and right bound for colormap. Positive values
+                    will be mirrored directly across zero during colormap
+                    construction to obtain negative control points.
+
 
         Returns
         -------
@@ -1396,15 +1355,53 @@ class SourceEstimate(_BaseSourceEstimate):
             A instance of surfer.viz.Brain from PySurfer.
         """
         brain = plot_source_estimates(self, subject, surface=surface,
-                        hemi=hemi, colormap=colormap, time_label=time_label,
-                        smoothing_steps=smoothing_steps, fmin=fmin, fmid=fmid,
-                        fmax=fmax, transparent=transparent, alpha=alpha,
-                        time_viewer=time_viewer, config_opts=config_opts,
-                        subjects_dir=subjects_dir, figure=figure, views=views,
-                        colorbar=colorbar)
+                                      hemi=hemi, colormap=colormap,
+                                      time_label=time_label,
+                                      smoothing_steps=smoothing_steps,
+                                      transparent=transparent, alpha=alpha,
+                                      time_viewer=time_viewer,
+                                      config_opts=config_opts,
+                                      subjects_dir=subjects_dir, figure=figure,
+                                      views=views, colorbar=colorbar,
+                                      clim=clim)
         return brain
 
     @verbose
+    def to_original_src(self, src_orig, subject_orig=None,
+                        subjects_dir=None, verbose=None):
+        """Return a SourceEstimate from morphed source to the original subject
+
+        Parameters
+        ----------
+        src_orig : instance of SourceSpaces
+            The original source spaces that were morphed to the current
+            subject.
+        subject_orig : str | None
+            The original subject. For most source spaces this shouldn't need
+            to be provided, since it is stored in the source space itself.
+        subjects_dir : string, or None
+            Path to SUBJECTS_DIR if it is not set in the environment.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        See Also
+        --------
+        morph_source_spaces
+
+        Notes
+        -----
+        .. versionadded:: 0.10.0
+        """
+        if self.subject is None:
+            raise ValueError('stc.subject must be set')
+        src_orig = _ensure_src(src_orig)
+        subject_orig = _ensure_src_subject(src_orig, subject_orig)
+        data_idx, vertices = _get_morph_src_reordering(
+            self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
+        return SourceEstimate(self._data[data_idx], vertices,
+                              self.tmin, self.tstep, subject_orig)
+
+    @verbose
     def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
               buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
               verbose=None):
@@ -1414,8 +1411,6 @@ class SourceEstimate(_BaseSourceEstimate):
         ----------
         subject_to : string
             Name of the subject on which to morph as named in the SUBJECTS_DIR
-        stc_from : SourceEstimate
-            Source estimates for subject "from" to morph
         grade : int, list (of two arrays), or None
             Resolution of the icosahedral mesh (typically 5). If None, all
             vertices will be used (potentially filling the surface). If a list,
@@ -1491,6 +1486,8 @@ class SourceEstimate(_BaseSourceEstimate):
                  vert_as_index=False, time_as_index=False):
         """Get location and latency of peak amplitude
 
+        Parameters
+        ----------
         hemi : {'lh', 'rh', None}
             The hemi to be considered. If None, the entire source space is
             considered.
@@ -1520,7 +1517,7 @@ class SourceEstimate(_BaseSourceEstimate):
         """
         data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
         vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
-                  None: np.concatenate(self.vertno)}[hemi]
+                  None: np.concatenate(self.vertices)}[hemi]
 
         vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
 
@@ -1556,19 +1553,23 @@ class VolSourceEstimate(_BaseSourceEstimate):
         The subject name.
     times : array of shape (n_times,)
         The time vector.
-    vertno : array of shape (n_dipoles,)
+    vertices : array of shape (n_dipoles,)
         The indices of the dipoles in the source space.
     data : array of shape (n_dipoles, n_times)
         The data in source space.
     shape : tuple
         The shape of the data. A tuple of int (n_dipoles, n_times).
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
     @verbose
     def __init__(self, data, vertices=None, tmin=None, tstep=None,
                  subject=None, verbose=None):
 
-        if not (isinstance(vertices, np.ndarray) or isinstance(vertices, list)
-                and len(vertices) == 1):
+        if not (isinstance(vertices, np.ndarray) or
+                isinstance(vertices, list) and len(vertices) == 1):
             raise ValueError('Vertices must be a numpy array or a list with '
                              'one array')
 
@@ -1597,17 +1598,15 @@ class VolSourceEstimate(_BaseSourceEstimate):
 
         if ftype == 'stc':
             logger.info('Writing STC to disk...')
-            if not (fname.endswith('-vl.stc')
-                    or fname.endswith('-vol.stc')):
+            if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
                 fname += '-vl.stc'
             _write_stc(fname, tmin=self.tmin, tstep=self.tstep,
-                       vertices=self.vertno, data=self.data)
+                       vertices=self.vertices, data=self.data)
         elif ftype == 'w':
             logger.info('Writing STC to disk (w format)...')
-            if not (fname.endswith('-vl.w')
-                    or fname.endswith('-vol.w')):
+            if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
                 fname += '-vl.w'
-            _write_w(fname, vertices=self.vertno, data=self.data)
+            _write_w(fname, vertices=self.vertices, data=self.data)
 
         logger.info('[done]')
 
@@ -1633,6 +1632,10 @@ class VolSourceEstimate(_BaseSourceEstimate):
         -------
         img : instance Nifti1Image
             The image object.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
         """
         save_stc_as_volume(fname, self, src, dest=dest,
                            mri_resolution=mri_resolution)
@@ -1657,15 +1660,19 @@ class VolSourceEstimate(_BaseSourceEstimate):
         -------
         img : instance Nifti1Image
             The image object.
+
+        Notes
+        -----
+        .. versionadded:: 0.9.0
         """
         return save_stc_as_volume(None, self, src, dest=dest,
                                   mri_resolution=mri_resolution)
 
     def __repr__(self):
-        if isinstance(self.vertno, list):
-            nv = sum([len(v) for v in self.vertno])
+        if isinstance(self.vertices, list):
+            nv = sum([len(v) for v in self.vertices])
         else:
-            nv = self.vertno.size
+            nv = self.vertices.size
         s = "%d vertices" % nv
         if self.subject is not None:
             s += ", subject : %s" % self.subject
@@ -1679,6 +1686,8 @@ class VolSourceEstimate(_BaseSourceEstimate):
                  vert_as_index=False, time_as_index=False):
         """Get location and latency of peak amplitude
 
+        Parameters
+        ----------
         tmin : float | None
             The minimum point in time to be considered for peak getting.
         tmax : float | None
@@ -1706,69 +1715,155 @@ class VolSourceEstimate(_BaseSourceEstimate):
         vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
                                        mode)
 
-        return (vert_idx if vert_as_index else self.vertno[vert_idx],
+        return (vert_idx if vert_as_index else self.vertices[vert_idx],
                 time_idx if time_as_index else self.times[time_idx])
 
 
-###############################################################################
-# Morphing
-
-def mesh_edges(tris):
-    """Returns sparse matrix with edges as an adjacency matrix
+class MixedSourceEstimate(_BaseSourceEstimate):
+    """Container for mixed surface and volume source estimates
 
     Parameters
     ----------
-    tris : array of shape [n_triangles x 3]
-        The triangles.
+    data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
+        The data in source space. The data can either be a single array or
+        a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
+        "sens_data" shape (n_sensors, n_times). In this case, the source
+        space data corresponds to "numpy.dot(kernel, sens_data)".
+    vertices : list of arrays
+        Vertex numbers corresponding to the data.
+    tmin : scalar
+        Time point of the first sample in data.
+    tstep : scalar
+        Time step between successive samples in data.
+    subject : str | None
+        The subject name. While not necessary, it is safer to set the
+        subject parameter to avoid analysis errors.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Attributes
+    ----------
+    subject : str | None
+        The subject name.
+    times : array of shape (n_times,)
+        The time vector.
+    vertices : list of arrays of shape (n_dipoles,)
+        The indices of the dipoles in each source space.
+    data : array of shape (n_dipoles, n_times)
+        The data in source space.
+    shape : tuple
+        The shape of the data. A tuple of int (n_dipoles, n_times).
 
-    Returns
-    -------
-    edges : sparse matrix
-        The adjacency matrix.
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
-    npoints = np.max(tris) + 1
-    ones_ntris = np.ones(3 * len(tris))
-    a, b, c = tris.T
-    x = np.concatenate((a, b, c))
-    y = np.concatenate((b, c, a))
-    edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
-    edges = edges.tocsr()
-    edges = edges + edges.T
-    return edges
+    @verbose
+    def __init__(self, data, vertices=None, tmin=None, tstep=None,
+                 subject=None, verbose=None):
 
+        if not isinstance(vertices, list) or len(vertices) < 2:
+            raise ValueError('Vertices must be a list of numpy arrays with '
+                             'one array per source space.')
 
-def mesh_dist(tris, vert):
-    """Compute adjacency matrix weighted by distances
+        _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
+                                     tstep=tstep, subject=subject,
+                                     verbose=verbose)
 
-    It generates an adjacency matrix where the entries are the distances
-    between neighboring vertices.
+    def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
+                     colormap='auto', time_label='time=%02.f ms',
+                     smoothing_steps=10,
+                     transparent=None, alpha=1.0, time_viewer=False,
+                     config_opts={}, subjects_dir=None, figure=None,
+                     views='lat', colorbar=True, clim='auto'):
+        """Plot surface source estimates with PySurfer
 
-    Parameters
-    ----------
-    tris : array (n_tris x 3)
-        Mesh triangulation
-    vert : array (n_vert x 3)
-        Vertex locations
+        Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
+        which will automatically be set by this function. Plotting multiple
+        SourceEstimates with different values for subjects_dir will cause
+        PySurfer to use the wrong FreeSurfer surfaces when using methods of
+        the returned Brain object. It is therefore recommended to set the
+        SUBJECTS_DIR environment variable or always use the same value for
+        subjects_dir (within the same Python session).
 
-    Returns
-    -------
-    dist_matrix : scipy.sparse.csr_matrix
-        Sparse matrix with distances between adjacent vertices
-    """
-    edges = mesh_edges(tris).tocoo()
+        Parameters
+        ----------
+        src : SourceSpaces
+            The source spaces to plot.
+        subject : str | None
+            The subject name corresponding to FreeSurfer environment
+            variable SUBJECT. If None stc.subject will be used. If that
+            is None, the environment will be used.
+        surface : str
+            The type of surface (inflated, white etc.).
+        hemi : str, 'lh' | 'rh' | 'split' | 'both'
+            The hemisphere to display. Using 'both' or 'split' requires
+            PySurfer version 0.4 or above.
+        colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+            Name of colormap to use. See `plot_source_estimates`.
+        time_label : str
+            How to print info about the time instant visualized.
+        smoothing_steps : int
+            The amount of smoothing.
+        transparent : bool | None
+            If True, use a linear transparency between fmin and fmid.
+            None will choose automatically based on colormap type.
+        alpha : float
+            Alpha value to apply globally to the overlay.
+        time_viewer : bool
+            Display time viewer GUI.
+        config_opts : dict
+            Keyword arguments for Brain initialization.
+            See pysurfer.viz.Brain.
+        subjects_dir : str
+            The path to the FreeSurfer subjects reconstructions.
+            It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
+        figure : instance of mayavi.core.scene.Scene | None
+            If None, the last figure will be cleaned and a new figure will
+            be created.
+        views : str | list
+            View to use. See surfer.Brain().
+        colorbar : bool
+            If True, display colorbar on scene.
+        clim : str | dict
+            Colorbar properties specification. See `plot_source_estimates`.
 
-    # Euclidean distances between neighboring vertices
-    dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
-                          axis=1))
+        Returns
+        -------
+        brain : Brain
+            A instance of surfer.viz.Brain from PySurfer.
+        """
+
+        # extract surface source spaces
+        src = _ensure_src(src)
+        surf = [s for s in src if s['type'] == 'surf']
+        if len(surf) != 2:
+            raise ValueError('Source space must contain exactly two surfaces.')
+
+        # extract surface source estimate
+        data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
+        vertices = [s['vertno'] for s in surf]
+
+        stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
+                             self.subject, self.verbose)
 
-    dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
+        return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
+                                     colormap=colormap, time_label=time_label,
+                                     smoothing_steps=smoothing_steps,
+                                     transparent=transparent, alpha=alpha,
+                                     time_viewer=time_viewer,
+                                     config_opts=config_opts,
+                                     subjects_dir=subjects_dir, figure=figure,
+                                     views=views, colorbar=colorbar, clim=clim)
 
-    return dist_matrix
+
+###############################################################################
+# Morphing
 
 
 @verbose
 def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
-                  verbose=None):
+                  warn=True, verbose=None):
     """Morph data from one subject's source space to another
 
     Parameters
@@ -1788,6 +1883,8 @@ def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
         Vertices on the destination surface to use.
     maps : sparse matrix
         Morph map from one subject to the other.
+    warn : bool
+        If True, warn if not all vertices were used.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1854,6 +1951,10 @@ def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
         data.data /= data_sum.repeat(np.diff(data.indptr))
     else:
         data[idx_use, :] /= data_sum[idx_use][:, None]
+    if len(idx_use) != len(data_sum) and warn:
+        warnings.warn('%s/%s vertices not included in smoothing, consider '
+                      'increasing the number of steps'
+                      % (len(data_sum) - len(idx_use), len(data_sum)))
 
     logger.info('    %d smooth iterations done.' % (k + 1))
     data_morphed = maps[nearest, :] * data
@@ -1930,24 +2031,25 @@ def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
 
     cnt = 0
     for k, hemi in enumerate(['lh', 'rh']):
-        if stc.vertno[k].size > 0:
+        if stc.vertices[k].size > 0:
             map_hemi = maps[k]
-            vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertno[k]])
+            vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertices[k]])
             order = np.argsort(vertno_k)
             n_active_hemi = len(vertno_k)
             data_hemi = stc_morph._data[cnt:cnt + n_active_hemi]
             stc_morph._data[cnt:cnt + n_active_hemi] = data_hemi[order]
-            stc_morph.vertno[k] = vertno_k[order]
+            stc_morph.vertices[k] = vertno_k[order]
             cnt += n_active_hemi
         else:
-            stc_morph.vertno[k] = np.array([], dtype=np.int64)
+            stc_morph.vertices[k] = np.array([], int)
 
     return stc_morph
 
 
 @verbose
 def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
-               subjects_dir=None, buffer_size=64, n_jobs=1, verbose=None):
+               subjects_dir=None, buffer_size=64, n_jobs=1, warn=True,
+               verbose=None):
     """Morph a source estimate from one subject to another
 
     Parameters
@@ -1979,6 +2081,8 @@ def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
         Saves memory when morphing long time intervals.
     n_jobs : int
         Number of jobs to run in parallel
+    warn : bool
+        If True, warn if not all vertices were used.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1992,7 +2096,7 @@ def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
                          'estimates')
 
     logger.info('Morphing data...')
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
     tris = _get_subject_sphere_tris(subject_from, subjects_dir)
     maps = read_morph_map(subject_from, subject_to, subjects_dir)
@@ -2010,12 +2114,13 @@ def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
         e.data[e.data == 2] = 1
         n_vertices = e.shape[0]
         e = e + sparse.eye(n_vertices, n_vertices)
-        idx_use = stc_from.vertno[hemi]
+        idx_use = stc_from.vertices[hemi]
         if len(idx_use) == 0:
             continue
         data_morphed[hemi] = np.concatenate(
             parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
-                                     n_vertices, nearest[hemi], maps[hemi])
+                                     n_vertices, nearest[hemi], maps[hemi],
+                                     warn=warn)
                      for data_buffer
                      in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
 
@@ -2023,13 +2128,13 @@ def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
     if data_morphed[0] is None:
         if data_morphed[1] is None:
             data = np.r_[[], []]
-            vertices = [np.array([], dtype=int), np.array([], dtype=int)]
+            vertices = [np.array([], int), np.array([], int)]
         else:
             data = data_morphed[1]
-            vertices = [np.array([], dtype=int), vertices[1]]
+            vertices = [np.array([], int), vertices[1]]
     elif data_morphed[1] is None:
         data = data_morphed[0]
-        vertices = [vertices[0], np.array([], dtype=int)]
+        vertices = [vertices[0], np.array([], int)]
     else:
         data = np.r_[data_morphed[0], data_morphed[1]]
 
@@ -2042,7 +2147,8 @@ def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
 
 @verbose
 def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
-                         smooth=None, subjects_dir=None, verbose=None):
+                         smooth=None, subjects_dir=None, warn=True,
+                         verbose=None):
     """Get a matrix that morphs data from one subject to another
 
     Parameters
@@ -2061,6 +2167,8 @@ def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
         with non-zero values.
     subjects_dir : string
         Path to SUBJECTS_DIR is not set in the environment
+    warn : bool
+        If True, warn if not all vertices were used.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -2070,7 +2178,7 @@ def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
         matrix that morphs data from subject_from to subject_to
     """
     logger.info('Computing morph matrix...')
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     tris = _get_subject_sphere_tris(subject_from, subjects_dir)
     maps = read_morph_map(subject_from, subject_to, subjects_dir)
 
@@ -2086,7 +2194,7 @@ def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
             continue
         m = sparse.eye(len(idx_use), len(idx_use), format='csr')
         morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
-                                      vertices_to[hemi], maps[hemi])
+                                      vertices_to[hemi], maps[hemi], warn=warn)
     # be careful about zero-length arrays
     if isinstance(morpher[0], list):
         morpher = morpher[1]
@@ -2132,7 +2240,7 @@ def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
     # add special case for fsaverage for speed
     if subject == 'fsaverage' and grade == 5:
         return [np.arange(10242), np.arange(10242)]
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
 
     spheres_to = [os.path.join(subjects_dir, subject, 'surf',
                                xh + '.sphere.reg') for xh in ['lh', 'rh']]
@@ -2236,9 +2344,9 @@ def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
     """
     if dist is None:
         if src[0]['use_tris'] is None:
-            raise Exception("The source space does not appear to be an ico "
-                            "surface. Connectivity cannot be extracted from "
-                            "non-ico source spaces.")
+            raise RuntimeError("The source space does not appear to be an ico "
+                               "surface. Connectivity cannot be extracted from"
+                               " non-ico source spaces.")
         used_verts = [np.unique(s['use_tris']) for s in src]
         lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
         rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
@@ -2295,7 +2403,8 @@ def grade_to_tris(grade, verbose=None):
 
 
 @verbose
-def spatio_temporal_tris_connectivity(tris, n_times, verbose=None):
+def spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,
+                                      verbose=None):
     """Compute connectivity from triangles and time instants
 
     Parameters
@@ -2304,6 +2413,9 @@ def spatio_temporal_tris_connectivity(tris, n_times, verbose=None):
         N x 3 array defining triangles.
     n_times : int
         Number of time points
+    remap_vertices : bool
+        Reassign vertex indices based on unique values. Useful
+        to process a subset of triangles. Defaults to False.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -2316,6 +2428,10 @@ def spatio_temporal_tris_connectivity(tris, n_times, verbose=None):
         vertices are time 1, the nodes from 2 to 2N are the vertices
         during time 2, etc.
     """
+    if remap_vertices:
+        logger.info('Reassigning vertex indices.')
+        tris = np.searchsorted(np.unique(tris), tris)
+
     edges = mesh_edges(tris).tocoo()
     return _get_connectivity_from_edges(edges, n_times)
 
@@ -2384,13 +2500,16 @@ def spatial_src_connectivity(src, dist=None, verbose=None):
 
 
 @verbose
-def spatial_tris_connectivity(tris, verbose=None):
+def spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):
     """Compute connectivity from triangles
 
     Parameters
     ----------
     tris : array
         N x 3 array defining triangles.
+    remap_vertices : bool
+        Reassign vertex indices based on unique values. Useful
+        to process a subset of triangles. Defaults to False.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -2399,7 +2518,7 @@ def spatial_tris_connectivity(tris, verbose=None):
     connectivity : sparse COO matrix
         The connectivity matrix describing the spatial graph structure.
     """
-    return spatio_temporal_tris_connectivity(tris, 1)
+    return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)
 
 
 def spatial_dist_connectivity(src, dist, verbose=None):
@@ -2425,38 +2544,6 @@ def spatial_dist_connectivity(src, dist, verbose=None):
     return spatio_temporal_dist_connectivity(src, 1, dist)
 
 
-def sparse_block_diag(mats, format=None, dtype=None):
-    """An implementation of scipy.sparse.block_diag since old versions of
-    scipy don't have it. Forms a sparse matrix by stacking matrices in block
-    diagonal form.
-
-    Parameters
-    ----------
-    mats : list of matrices
-        Input matrices.
-    format : str, optional
-        The sparse format of the result (e.g. "csr"). If not given, the
-        matrix is returned in "coo" format.
-    dtype : dtype specifier, optional
-        The data-type of the output matrix. If not given, the dtype is
-        determined from that of blocks.
-
-    Returns
-    -------
-    res : sparse matrix
-    """
-    try:
-        return sparse.block_diag(mats, format=format, dtype=dtype)
-    except AttributeError:
-        nmat = len(mats)
-        rows = []
-        for ia, a in enumerate(mats):
-            row = [None] * nmat
-            row[ia] = a
-            rows.append(row)
-        return sparse.bmat(rows, format=format, dtype=dtype)
-
-
 @verbose
 def _get_connectivity_from_edges(edges, n_times, verbose=None):
     """Given edges sparse matrix, create connectivity matrix"""
@@ -2467,10 +2554,10 @@ def _get_connectivity_from_edges(edges, n_times, verbose=None):
     col = (edges.col[None, :] + aux).ravel()
     row = (edges.row[None, :] + aux).ravel()
     if n_times > 1:  # add temporal edges
-        o = (n_vertices * np.arange(n_times - 1)[:, None]
-             + np.arange(n_vertices)[None, :]).ravel()
-        d = (n_vertices * np.arange(1, n_times)[:, None]
-             + np.arange(n_vertices)[None, :]).ravel()
+        o = (n_vertices * np.arange(n_times - 1)[:, None] +
+             np.arange(n_vertices)[None, :]).ravel()
+        d = (n_vertices * np.arange(1, n_times)[:, None] +
+             np.arange(n_vertices)[None, :]).ravel()
         row = np.concatenate((row, o, d))
         col = np.concatenate((col, d, o))
     data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
@@ -2593,7 +2680,7 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
 
     n_labels = len(labels)
 
-    # get vertno from source space, they have to be the same as in the stcs
+    # get vertices from source space, they have to be the same as in the stcs
     vertno = [s['vertno'] for s in src]
     nvert = [len(vn) for vn in vertno]
 
@@ -2635,10 +2722,10 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
     if mode == 'mean':
         pass  # we have this here to catch invalid values for mode
     elif mode == 'mean_flip':
-       # get the sign-flip vector for every label
+        # get the sign-flip vector for every label
         label_flip = _get_label_flip(labels, label_vertidx, src)
     elif mode == 'pca_flip':
-       # get the sign-flip vector for every label
+        # get the sign-flip vector for every label
         label_flip = _get_label_flip(labels, label_vertidx, src)
     elif mode == 'max':
         pass  # we calculate the maximum value later
@@ -2647,11 +2734,11 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
 
     # loop through source estimates and extract time series
     for stc in stcs:
-
         # make sure the stc is compatible with the source space
-        if len(stc.vertno[0]) != nvert[0] or len(stc.vertno[1]) != nvert[1]:
+        if len(stc.vertices[0]) != nvert[0] or \
+                len(stc.vertices[1]) != nvert[1]:
             raise ValueError('stc not compatible with source space')
-        if any([np.any(svn != vn) for svn, vn in zip(stc.vertno, vertno)]):
+        if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):
             raise ValueError('stc not compatible with source space')
 
         logger.info('Extracting time courses for %d labels (mode: %s)'
@@ -2704,24 +2791,22 @@ def extract_label_time_course(stcs, labels, src, mode='mean_flip',
     parameter.
 
     Valid values for mode are:
-    --------------------------
 
-    mean : Average within each label.
+        - 'mean': Average within each label.
+        - 'mean_flip': Average within each label with sign flip depending
+          on source orientation.
+        - 'pca_flip': Apply an SVD to the time courses within each label
+          and use the scaled and sign-flipped first right-singular vector
+          as the label time course. The scaling is performed such that the
+          power of the label time course is the same as the average
+          per-vertex time course power within the label. The sign of the
+          resulting time course is adjusted by multiplying it with
+          "sign(dot(u, flip))" where u is the first left-singular vector,
+          and flip is a sing-flip vector based on the vertex normals. This
+          procedure assures that the phase does not randomly change by 180
+          degrees from one stc to the next.
+        - 'max': Max value within each label.
 
-    mean_flip : Average within each label with sign flip depending on source
-        orientation.
-
-    pca_flip : Apply an SVD to the time courses within each label and use the
-        scaled and sign-flipped first right-singular vector as the label time
-        course. The scaling is performed such that the power of the label time
-        course is the same as the average per-vertex time course power within
-        the label. The sign of the resulting time course is adjusted by
-        multiplying it with "sign(dot(u, flip))" where u is the first
-        left-singular vector, and flip is a sing-flip vector based on the
-        vertex normals. This procedure assures that the phase does not randomly
-        change by 180 degrees from one stc to the next.
-
-    max : Max value within each label.
 
     Parameters
     ----------
@@ -2743,10 +2828,9 @@ def extract_label_time_course(stcs, labels, src, mode='mean_flip',
 
     Returns
     -------
-    label_tc : array | list (or generator) of array,
-               shape=(len(labels), n_times)
+    label_tc : array | list (or generator) of array, shape=(len(labels), n_times)
         Extracted time course for each label and source estimate.
-    """
+    """  # noqa
     # convert inputs to lists
     if isinstance(stcs, SourceEstimate):
         stcs = [stcs]
diff --git a/mne/source_space.py b/mne/source_space.py
index 8191e19..4d99e0e 100644
--- a/mne/source_space.py
+++ b/mne/source_space.py
@@ -3,7 +3,6 @@
 #
 # License: BSD (3-clause)
 
-from .externals.six import string_types
 import numpy as np
 import os
 import os.path as op
@@ -18,19 +17,40 @@ from .io.write import (start_block, end_block, write_int,
                        write_float_sparse_rcs, write_string,
                        write_float_matrix, write_int_matrix,
                        write_coord_trans, start_file, end_file, write_id)
+from .bem import read_bem_surfaces
 from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
-                      _tessellate_sphere_surf, read_bem_surfaces,
+                      _tessellate_sphere_surf, _get_surf_neighbors,
                       _read_surface_geom, _normalize_vectors,
                       _complete_surface_info, _compute_nearest,
-                      fast_cross_3d)
-from .source_estimate import mesh_dist
+                      fast_cross_3d, _fast_cross_nd_sum, mesh_dist,
+                      _triangle_neighbors)
 from .utils import (get_subjects_dir, run_subprocess, has_freesurfer,
                     has_nibabel, check_fname, logger, verbose,
-                    check_scipy_version)
-from .fixes import in1d, partial, gzip_open
+                    check_version, _get_call_line)
+from .fixes import in1d, partial, gzip_open, meshgrid
 from .parallel import parallel_func, check_n_jobs
 from .transforms import (invert_transform, apply_trans, _print_coord_trans,
-                         combine_transforms)
+                         combine_transforms, _get_mri_head_t,
+                         _coord_frame_name, Transform)
+from .externals.six import string_types
+
+
+def _get_lut():
+    """Helper to get the FreeSurfer LUT"""
+    data_dir = op.join(op.dirname(__file__), 'data')
+    lut_fname = op.join(data_dir, 'FreeSurferColorLUT.txt')
+    return np.genfromtxt(lut_fname, dtype=None,
+                         usecols=(0, 1), names=['id', 'name'])
+
+
+def _get_lut_id(lut, label, use_lut):
+    """Helper to convert a label to a LUT ID number"""
+    if not use_lut:
+        return 1
+    assert isinstance(label, string_types)
+    mask = (lut['name'] == label.encode('utf-8'))
+    assert mask.sum() == 1
+    return lut['id'][mask]
 
 
 class SourceSpaces(list):
@@ -65,16 +85,27 @@ class SourceSpaces(list):
         for ss in self:
             ss_type = ss['type']
             if ss_type == 'vol':
-                r = ("'vol', shape=%s, n_used=%i"
-                     % (repr(ss['shape']), ss['nuse']))
+                if 'seg_name' in ss:
+                    r = ("'vol' (%s), n_used=%i"
+                         % (ss['seg_name'], ss['nuse']))
+                else:
+                    r = ("'vol', shape=%s, n_used=%i"
+                         % (repr(ss['shape']), ss['nuse']))
             elif ss_type == 'surf':
                 r = "'surf', n_vertices=%i, n_used=%i" % (ss['np'], ss['nuse'])
             else:
                 r = "%r" % ss_type
+            coord_frame = ss['coord_frame']
+            if isinstance(coord_frame, np.ndarray):
+                coord_frame = coord_frame[0]
+            r += ', coordinate_frame=%s' % _coord_frame_name(coord_frame)
             ss_repr.append('<%s>' % r)
         ss_repr = ', '.join(ss_repr)
         return "<SourceSpaces: [{ss}]>".format(ss=ss_repr)
 
+    def __add__(self, other):
+        return SourceSpaces(list.__add__(self, other))
+
     def copy(self):
         """Make a copy of the source spaces
 
@@ -96,6 +127,288 @@ class SourceSpaces(list):
         """
         write_source_spaces(fname, self)
 
+    @verbose
+    def export_volume(self, fname, include_surfaces=True,
+                      include_discrete=True, dest='mri', trans=None,
+                      mri_resolution=False, use_lut=True, verbose=None):
+        """Exports source spaces to nifti or mgz file
+
+        Parameters
+        ----------
+        fname : str
+            Name of nifti or mgz file to write.
+        include_surfaces : bool
+            If True, include surface source spaces.
+        include_discrete : bool
+            If True, include discrete source spaces.
+        dest : 'mri' | 'surf'
+            If 'mri' the volume is defined in the coordinate system of the
+            original T1 image. If 'surf' the coordinate system of the
+            FreeSurfer surface is used (Surface RAS).
+        trans : dict, str, or None
+            Either a transformation filename (usually made using mne_analyze)
+            or an info dict (usually opened using read_trans()).
+            If string, an ending of `.fif` or `.fif.gz` will be assumed to be
+            in FIF format, any other ending will be assumed to be a text file
+            with a 4x4 transformation matrix (like the `--trans` MNE-C option.
+            Must be provided if source spaces are in head coordinates and
+            include_surfaces and mri_resolution are True.
+        mri_resolution : bool
+            If True, the image is saved in MRI resolution
+            (e.g. 256 x 256 x 256).
+        use_lut : bool
+            If True, assigns a numeric value to each source space that
+            corresponds to a color on the freesurfer lookup table.
+        verbose : bool, str, int, or None
+            If not None, override default verbose level (see mne.verbose).
+
+        Notes
+        -----
+        This method requires nibabel.
+        """
+
+        # import nibabel or raise error
+        try:
+            import nibabel as nib
+        except ImportError:
+            raise ImportError('This function requires nibabel.')
+
+        # Check coordinate frames of each source space
+        coord_frames = np.array([s['coord_frame'] for s in self])
+
+        # Raise error if trans is not provided when head coordinates are used
+        # and mri_resolution and include_surfaces are true
+        if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
+            coords = 'head'  # all sources in head coordinates
+            if mri_resolution and include_surfaces:
+                if trans is None:
+                    raise ValueError('trans containing mri to head transform '
+                                     'must be provided if mri_resolution and '
+                                     'include_surfaces are true and surfaces '
+                                     'are in head coordinates')
+
+            elif trans is not None:
+                logger.info('trans is not needed and will not be used unless '
+                            'include_surfaces and mri_resolution are True.')
+
+        elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
+            coords = 'mri'  # all sources in mri coordinates
+            if trans is not None:
+                logger.info('trans is not needed and will not be used unless '
+                            'sources are in head coordinates.')
+        # Raise error if all sources are not in the same space, or sources are
+        # not in mri or head coordinates
+        else:
+            raise ValueError('All sources must be in head coordinates or all '
+                             'sources must be in mri coordinates.')
+
+        # use lookup table to assign values to source spaces
+        logger.info('Reading FreeSurfer lookup table')
+        # read the lookup table
+        lut = _get_lut()
+
+        # Setup a dictionary of source types
+        src_types = dict(volume=[], surface=[], discrete=[])
+
+        # Populate dictionary of source types
+        for src in self:
+            # volume sources
+            if src['type'] == 'vol':
+                src_types['volume'].append(src)
+            # surface sources
+            elif src['type'] == 'surf':
+                src_types['surface'].append(src)
+            # discrete sources
+            elif src['type'] == 'discrete':
+                src_types['discrete'].append(src)
+            # raise an error if dealing with source type other than volume
+            # surface or discrete
+            else:
+                raise ValueError('Unrecognized source type: %s.' % src['type'])
+
+        # Get shape, inuse array and interpolation matrix from volume sources
+        first_vol = True  # mark the first volume source
+        # Loop through the volume sources
+        for vs in src_types['volume']:
+            # read the lookup table value for segmented volume
+            if 'seg_name' not in vs:
+                raise ValueError('Volume sources should be segments, '
+                                 'not the entire volume.')
+            # find the color value for this volume
+            i = _get_lut_id(lut, vs['seg_name'], use_lut)
+
+            if first_vol:
+                # get the inuse array
+                if mri_resolution:
+                    # read the mri file used to generate volumes
+                    aseg = nib.load(vs['mri_file'])
+
+                    # get the voxel space shape
+                    shape3d = (vs['mri_height'], vs['mri_depth'],
+                               vs['mri_width'])
+
+                    # get the values for this volume
+                    inuse = i * (aseg.get_data() == i).astype(int)
+                    # store as 1D array
+                    inuse = inuse.ravel((2, 1, 0))
+
+                else:
+                    inuse = i * vs['inuse']
+
+                    # get the volume source space shape
+                    shape = vs['shape']
+
+                    # read the shape in reverse order
+                    # (otherwise results are scrambled)
+                    shape3d = (shape[2], shape[1], shape[0])
+
+                first_vol = False
+
+            else:
+                # update the inuse array
+                if mri_resolution:
+
+                    # get the values for this volume
+                    use = i * (aseg.get_data() == i).astype(int)
+                    inuse += use.ravel((2, 1, 0))
+                else:
+                    inuse += i * vs['inuse']
+
+        # Raise error if there are no volume source spaces
+        if first_vol:
+            raise ValueError('Source spaces must contain at least one volume.')
+
+        # create 3d grid in the MRI_VOXEL coordinate frame
+        # len of inuse array should match shape regardless of mri_resolution
+        assert len(inuse) == np.prod(shape3d)
+
+        # setup the image in 3d space
+        img = inuse.reshape(shape3d).T
+
+        # include surface and/or discrete source spaces
+        if include_surfaces or include_discrete:
+
+            # setup affine transform for source spaces
+            if mri_resolution:
+                # get the MRI to MRI_VOXEL transform
+                affine = invert_transform(vs['vox_mri_t'])
+            else:
+                # get the MRI to SOURCE (MRI_VOXEL) transform
+                affine = invert_transform(vs['src_mri_t'])
+
+            # modify affine if in head coordinates
+            if coords == 'head':
+
+                # read mri -> head transformation
+                mri_head_t = _get_mri_head_t(trans)[0]
+
+                # get the HEAD to MRI transform
+                head_mri_t = invert_transform(mri_head_t)
+
+                # combine transforms, from HEAD to MRI_VOXEL
+                affine = combine_transforms(head_mri_t, affine,
+                                            'head', 'mri_voxel')
+
+            # loop through the surface source spaces
+            if include_surfaces:
+
+                # get the surface names (assumes left, right order. may want
+                # to add these names during source space generation
+                surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex']
+
+                for i, surf in enumerate(src_types['surface']):
+                    # convert vertex positions from their native space
+                    # (either HEAD or MRI) to MRI_VOXEL space
+                    srf_rr = apply_trans(affine['trans'], surf['rr'])
+                    # convert to numeric indices
+                    ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int)
+                    # clip indices outside of volume space
+                    ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
+                                         0)
+                    iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
+                                         0)
+                    iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
+                                         0)
+                    # compare original and clipped indices
+                    n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
+                                       iz_orig != iz_clip)).any(0).sum()
+                    # generate use warnings for clipping
+                    if n_diff > 0:
+                        logger.warning('%s surface vertices lay outside '
+                                       'of volume space. Consider using a '
+                                       'larger volume space.' % n_diff)
+                    # get surface id or use default value
+                    i = _get_lut_id(lut, surf_names[i], use_lut)
+                    # update image to include surface voxels
+                    img[ix_clip, iy_clip, iz_clip] = i
+
+            # loop through discrete source spaces
+            if include_discrete:
+                for i, disc in enumerate(src_types['discrete']):
+                    # convert vertex positions from their native space
+                    # (either HEAD or MRI) to MRI_VOXEL space
+                    disc_rr = apply_trans(affine['trans'], disc['rr'])
+                    # convert to numeric indices
+                    ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int)
+                    # clip indices outside of volume space
+                    ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1),
+                                         0)
+                    iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1),
+                                         0)
+                    iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1),
+                                         0)
+                    # compare original and clipped indices
+                    n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip,
+                                       iz_orig != iz_clip)).any(0).sum()
+                    # generate use warnings for clipping
+                    if n_diff > 0:
+                        logger.warning('%s discrete vertices lay outside '
+                                       'of volume space. Consider using a '
+                                       'larger volume space.' % n_diff)
+                    # set default value
+                    img[ix_clip, iy_clip, iz_clip] = 1
+                    if use_lut:
+                        logger.info('Discrete sources do not have values on '
+                                    'the lookup table. Defaulting to 1.')
+
+        # calculate affine transform for image (MRI_VOXEL to RAS)
+        if mri_resolution:
+            # MRI_VOXEL to MRI transform
+            transform = vs['vox_mri_t'].copy()
+        else:
+            # MRI_VOXEL to MRI transform
+            # NOTE: 'src' indicates downsampled version of MRI_VOXEL
+            transform = vs['src_mri_t'].copy()
+        if dest == 'mri':
+            # combine with MRI to RAS transform
+            transform = combine_transforms(transform, vs['mri_ras_t'],
+                                           transform['from'],
+                                           vs['mri_ras_t']['to'])
+        # now setup the affine for volume image
+        affine = transform['trans']
+        # make sure affine converts from m to mm
+        affine[:3] *= 1e3
+
+        # save volume data
+
+        # setup image for file
+        if fname.endswith(('.nii', '.nii.gz')):  # save as nifit
+            # setup the nifti header
+            hdr = nib.Nifti1Header()
+            hdr.set_xyzt_units('mm')
+            # save the nifti image
+            img = nib.Nifti1Image(img, affine, header=hdr)
+        elif fname.endswith('.mgz'):  # save as mgh
+            # convert to float32 (float64 not currently supported)
+            img = img.astype('float32')
+            # save the mgh image
+            img = nib.freesurfer.mghformat.MGHImage(img, affine)
+        else:
+            raise(ValueError('Unrecognized file extension'))
+
+        # write image to file
+        nib.save(img, fname)
+
 
 def _add_patch_info(s):
     """Patch information in a source space
@@ -138,7 +451,8 @@ def _add_patch_info(s):
 
 
 @verbose
-def read_source_spaces_from_tree(fid, tree, add_geom=False, verbose=None):
+def _read_source_spaces_from_tree(fid, tree, patch_stats=False,
+                                  verbose=None):
     """Read the source spaces from a FIF file
 
     Parameters
@@ -147,8 +461,8 @@ def read_source_spaces_from_tree(fid, tree, add_geom=False, verbose=None):
         An open file descriptor.
     tree : dict
         The FIF tree structure if source is a file id.
-    add_geom : bool, optional (default False)
-        Add geometry information to the surfaces.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -167,19 +481,17 @@ def read_source_spaces_from_tree(fid, tree, add_geom=False, verbose=None):
         logger.info('    Reading a source space...')
         this = _read_one_source_space(fid, s)
         logger.info('    [done]')
-        if add_geom:
+        if patch_stats:
             _complete_source_space_info(this)
 
         src.append(this)
 
-    src = SourceSpaces(src)
     logger.info('    %d source spaces read' % len(spaces))
-
-    return src
+    return SourceSpaces(src)
 
 
 @verbose
-def read_source_spaces(fname, add_geom=False, verbose=None):
+def read_source_spaces(fname, patch_stats=False, verbose=None):
     """Read the source spaces from a FIF file
 
     Parameters
@@ -187,8 +499,8 @@ def read_source_spaces(fname, add_geom=False, verbose=None):
     fname : str
         The name of the file, which should end with -src.fif or
         -src.fif.gz.
-    add_geom : bool, optional (default False)
-        Add geometry information to the surfaces.
+    patch_stats : bool, optional (default False)
+        Calculate and add cortical patch statistics to the surfaces.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -196,6 +508,10 @@ def read_source_spaces(fname, add_geom=False, verbose=None):
     -------
     src : SourceSpaces
         The source spaces.
+
+    See Also
+    --------
+    write_source_spaces, setup_source_space, setup_volume_source_space
     """
     # be more permissive on read than write (fwd/inv can contain src)
     check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
@@ -204,8 +520,8 @@ def read_source_spaces(fname, add_geom=False, verbose=None):
 
     ff, tree, _ = fiff_open(fname)
     with ff as fid:
-        src = read_source_spaces_from_tree(fid, tree, add_geom=add_geom,
-                                           verbose=verbose)
+        src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
+                                            verbose=verbose)
         src.info['fname'] = fname
         node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
         if node:
@@ -304,6 +620,25 @@ def _read_one_source_space(fid, this, verbose=None):
         if tag is not None:
             res['mri_depth'] = int(tag.data)
 
+        tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
+        if tag is not None:
+            res['mri_volume_name'] = tag.data
+
+        tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
+        if tag is not None:
+            nneighbors = tag.data
+            tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
+            offset = 0
+            neighbors = []
+            for n in nneighbors:
+                neighbors.append(tag.data[offset:offset + n])
+                offset += n
+            res['neighbor_vert'] = neighbors
+
+        tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
+        if tag is not None:
+            res['seg_name'] = tag.data
+
     tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
     if tag is None:
         raise ValueError('Number of vertices not found')
@@ -487,13 +822,14 @@ def label_src_vertno_sel(label, src):
 
     Returns
     -------
-    vertno : list of length 2
+    vertices : list of length 2
         Vertex numbers for lh and rh
-    src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
+    src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
         Indices of the selected vertices in sourse space
     """
     if src[0]['type'] != 'surf':
-        return Exception('Label are only supported with surface source spaces')
+        return Exception('Labels are only supported with surface source '
+                         'spaces')
 
     vertno = [src[0]['vertno'], src[1]['vertno']]
 
@@ -501,11 +837,11 @@ def label_src_vertno_sel(label, src):
         vertno_sel = np.intersect1d(vertno[0], label.vertices)
         src_sel = np.searchsorted(vertno[0], vertno_sel)
         vertno[0] = vertno_sel
-        vertno[1] = np.array([])
+        vertno[1] = np.array([], int)
     elif label.hemi == 'rh':
         vertno_sel = np.intersect1d(vertno[1], label.vertices)
         src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
-        vertno[0] = np.array([])
+        vertno[0] = np.array([], int)
         vertno[1] = vertno_sel
     elif label.hemi == 'both':
         vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
@@ -562,6 +898,10 @@ def write_source_spaces(fname, src, verbose=None):
         The source spaces (as returned by read_source_spaces).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    See Also
+    --------
+    read_source_spaces
     """
     check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz'))
 
@@ -674,6 +1014,11 @@ def _write_one_source_space(fid, this, verbose=None):
         write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
                            this['dist_limit'])
 
+    #   Segmentation data
+    if this['type'] == 'vol' and ('seg_name' in this):
+        # Save the name of the segment
+        write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
+
 
 ##############################################################################
 # Surface to MNI conversion
@@ -711,7 +1056,7 @@ def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
     This function requires either nibabel (in Python) or Freesurfer
     (with utility "mri_info") to be correctly installed.
     """
-    if not has_freesurfer and not has_nibabel():
+    if not has_freesurfer() and not has_nibabel():
         raise RuntimeError('NiBabel (Python) or Freesurfer (Unix) must be '
                            'correctly installed and accessible from Python')
 
@@ -724,17 +1069,18 @@ def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, mode=None,
     if not len(hemis) == len(vertices):
         raise ValueError('hemi and vertices must match in length')
 
-    subjects_dir = get_subjects_dir(subjects_dir)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
 
     surfs = [op.join(subjects_dir, subject, 'surf', '%s.white' % h)
              for h in ['lh', 'rh']]
+
+    # read surface locations in MRI space
     rr = [read_surface(s)[0] for s in surfs]
 
-    # take point locations in RAS space and convert to MNI coordinates
+    # take point locations in MRI space and convert to MNI coordinates
     xfm = _read_talxfm(subject, subjects_dir, mode)
-    data = np.array([np.concatenate((rr[h][v, :], [1]))
-                     for h, v in zip(hemis, vertices)]).T
-    return np.dot(xfm, data)[:3, :].T.copy()
+    data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)])
+    return apply_trans(xfm['trans'], data)
 
 
 @verbose
@@ -744,10 +1090,11 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
     Adapted from freesurfer m-files. Altered to deal with Norig
     and Torig correctly.
     """
-    if mode is not None and not mode in ['nibabel', 'freesurfer']:
+    if mode is not None and mode not in ['nibabel', 'freesurfer']:
         raise ValueError('mode must be "nibabel" or "freesurfer"')
     fname = op.join(subjects_dir, subject, 'mri', 'transforms',
                     'talairach.xfm')
+    # read the RAS to MNI transform from talairach.xfm
     with open(fname, 'r') as fid:
         logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
 
@@ -775,8 +1122,17 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
             raise ValueError('failed to find \'Linear_Transform\' string in '
                              'xfm file:\n%s' % fname)
 
+    # Setup the RAS to MNI transform
+    ras_mni_t = {'from': FIFF.FIFFV_MNE_COORD_RAS,
+                 'to': FIFF.FIFFV_MNE_COORD_MNI_TAL, 'trans': xfm}
+
     # now get Norig and Torig
+    # (i.e. vox_ras_t and vox_mri_t, respectively)
     path = op.join(subjects_dir, subject, 'mri', 'orig.mgz')
+    if not op.isfile(path):
+        path = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
+    if not op.isfile(path):
+        raise IOError('mri not found: %s' % path)
 
     if has_nibabel():
         use_nibabel = True
@@ -793,7 +1149,9 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
         import nibabel as nib
         img = nib.load(path)
         hdr = img.get_header()
+        # read the MRI_VOXEL to RAS transform
         n_orig = hdr.get_vox2ras()
+        # read the MRI_VOXEL to MRI transform
         ds = np.array(hdr.get_zooms())
         ns = (np.array(hdr.get_data_shape()[:3]) * ds) / 2.0
         t_orig = np.array([[-ds[0], 0, 0, ns[0]],
@@ -809,8 +1167,25 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
             if not stdout.size == 16:
                 raise ValueError('Could not parse Freesurfer mri_info output')
             nt_orig.append(stdout.reshape(4, 4))
-    xfm = np.dot(xfm, np.dot(nt_orig[0], linalg.inv(nt_orig[1])))
-    return xfm
+    # extract the MRI_VOXEL to RAS transform
+    n_orig = nt_orig[0]
+    vox_ras_t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
+                 'to': FIFF.FIFFV_MNE_COORD_RAS,
+                 'trans': n_orig}
+
+    # extract the MRI_VOXEL to MRI transform
+    t_orig = nt_orig[1]
+    vox_mri_t = Transform('mri_voxel', 'mri', t_orig)
+
+    # invert MRI_VOXEL to MRI to get the MRI to MRI_VOXEL transform
+    mri_vox_t = invert_transform(vox_mri_t)
+
+    # construct an MRI to RAS transform
+    mri_ras_t = combine_transforms(mri_vox_t, vox_ras_t, 'mri', 'ras')
+
+    # construct the MRI to MNI transform
+    mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, 'mri', 'mni_tal')
+    return mri_mni_t
 
 
 ###############################################################################
@@ -818,8 +1193,8 @@ def _read_talxfm(subject, subjects_dir, mode=None, verbose=None):
 
 @verbose
 def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
-                       overwrite=False, subjects_dir=None, add_dist=None,
-                       verbose=None):
+                       overwrite=False, subjects_dir=None, add_dist=True,
+                       n_jobs=1, verbose=None):
     """Setup a source space with subsampling
 
     Parameters
@@ -841,8 +1216,10 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
         Path to SUBJECTS_DIR if it is not set in the environment.
     add_dist : bool
         Add distance and patch information to the source space. This takes some
-        time so precomputing it is recommended. The default is currently False
-        but will change to True in release 0.9.
+        time so precomputing it is recommended.
+    n_jobs : int
+        Number of jobs to run in parallel. Will use at most 2 jobs
+        (one for each hemisphere).
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -851,13 +1228,6 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
     src : list
         The source space for each hemisphere.
     """
-    if add_dist is None:
-        msg = ("The add_dist parameter to mne.setup_source_space currently "
-               "defaults to False, but the default will change to True in "
-               "release 0.9. Specify the parameter explicitly to avoid this "
-               "warning.")
-        logger.warning(msg)
-
     cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
            'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
            % (subject, fname, spacing, surface, overwrite,
@@ -930,7 +1300,7 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
 
     # pre-load ico/oct surf (once) for speed, if necessary
     if stype in ['ico', 'oct']:
-        ### from mne_ico_downsample.c ###
+        # ### from mne_ico_downsample.c ###
         if stype == 'ico':
             logger.info('Doing the icosahedral vertex picking...')
             ico_surf = _get_ico_surface(sval)
@@ -942,6 +1312,7 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
 
     for hemi, surf in zip(['lh', 'rh'], surfs):
         logger.info('Loading %s...' % surf)
+        # Setup the surface spacing in the MRI coord frame
         s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
                                  subjects_dir)
         logger.info('loaded %s %d/%d selected to source space (%s)'
@@ -966,7 +1337,7 @@ def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
     src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
 
     if add_dist:
-        add_source_space_distances(src, verbose=verbose)
+        add_source_space_distances(src, n_jobs=n_jobs, verbose=verbose)
 
     # write out if requested, then return the data
     if fname is not None:
@@ -981,6 +1352,7 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
                               sphere=(0.0, 0.0, 0.0, 90.0), bem=None,
                               surface=None, mindist=5.0, exclude=0.0,
                               overwrite=False, subjects_dir=None,
+                              volume_label=None, add_interpolator=True,
                               verbose=None):
     """Setup a volume source space with grid spacing or discrete source space
 
@@ -1023,6 +1395,11 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
         If True, overwrite output file (if it exists).
     subjects_dir : string, or None
         Path to SUBJECTS_DIR if it is not set in the environment.
+    volume_label : str | None
+        Region of interest corresponding with freesurfer lookup table.
+    add_interpolator : bool
+        If True and ``mri`` is not None, then an interpolation matrix
+        will be produced.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -1035,11 +1412,16 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
 
     Notes
     -----
-    To create a discrete source space, `pos` must be a dict. To create a
-    volume source space, `pos` must be a float. Note that if a discrete
-    source space is created, then `mri` is optional (can be None), whereas
-    for a volume source space, `mri` must be provided.
+    To create a discrete source space, `pos` must be a dict, 'mri' must be
+    None, and 'volume_label' must be None. To create a whole brain volume
+    source space, `pos` must be a float and 'mri' must be provided. To create
+    a volume source space from label, 'pos' must be a float, 'volume_label'
+    must be provided, and 'mri' must refer to a .mgh or .mgz file with values
+    corresponding to the freesurfer lookup-table (typically aseg.mgz).
     """
+
+    subjects_dir = get_subjects_dir(subjects_dir)
+
     if bem is not None and surface is not None:
         raise ValueError('Only one of "bem" and "surface" should be '
                          'specified')
@@ -1050,12 +1432,16 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
             raise ValueError('Cannot create interpolation matrix for '
                              'discrete source space, mri must be None if '
                              'pos is a dict')
-    elif not isinstance(pos, dict):
-        # "pos" will create a discrete src, so we don't need "mri"
-        # if "pos" is None, we must have "mri" b/c it will be vol src
-        raise RuntimeError('"mri" must be provided if "pos" is not a dict '
-                           '(i.e., if a volume instead of discrete source '
-                           'space is desired)')
+
+    if volume_label is not None:
+        if mri is None:
+            raise RuntimeError('"mri" must be provided if "volume_label" is '
+                               'not None')
+        # Check that volume label is found in .mgz file
+        volume_labels = get_volume_labels_from_aseg(mri)
+        if volume_label not in volume_labels:
+            raise ValueError('Volume %s not found in file %s. Double check '
+                             'freesurfer lookup table.' % (volume_label, mri))
 
     sphere = np.asarray(sphere)
     if sphere.size != 4:
@@ -1066,7 +1452,7 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
         logger.info('BEM file              : %s', bem)
     elif surface is not None:
         if isinstance(surface, dict):
-            if not all([key in surface for key in ['rr', 'tris']]):
+            if not all(key in surface for key in ['rr', 'tris']):
                 raise KeyError('surface, if dict, must have entries "rr" '
                                'and "tris"')
             # let's make sure we have geom info
@@ -1084,7 +1470,7 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
 
     # triage pos argument
     if isinstance(pos, dict):
-        if not all([key in pos for key in ['rr', 'nn']]):
+        if not all(key in pos for key in ['rr', 'nn']):
             raise KeyError('pos, if dict, must contain "rr" and "nn"')
         pos_extra = 'dict()'
     else:  # pos should be float-like
@@ -1102,12 +1488,12 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
     if isinstance(pos, float):
         logger.info('grid                  : %.1f mm' % pos)
         logger.info('mindist               : %.1f mm' % mindist)
-        pos /= 1000.0
+        pos /= 1000.0  # convert pos from m to mm
     if exclude > 0.0:
         logger.info('Exclude               : %.1f mm' % exclude)
     if mri is not None:
         logger.info('MRI volume            : %s' % mri)
-    exclude /= 1000.0
+    exclude /= 1000.0  # convert exclude from m to mm
     logger.info('')
 
     # Explicit list of points
@@ -1117,12 +1503,14 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
     else:
         # Load the brain surface as a template
         if bem is not None:
+            # read bem surface in the MRI coordinate frame
             surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
                                      verbose=False)
             logger.info('Loaded inner skull from %s (%d nodes)'
                         % (bem, surf['np']))
         elif surface is not None:
             if isinstance(surface, string_types):
+                # read the surface in the MRI coordinate frame
                 surf = _read_surface_geom(surface)
             else:
                 surf = surface
@@ -1135,16 +1523,24 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
             surf = _get_ico_surface(3)
 
             # Scale and shift
+
+            # center at origin and make radius 1
             _normalize_vectors(surf['rr'])
+
+            # normalize to sphere (in MRI coord frame)
             surf['rr'] *= sphere[3] / 1000.0  # scale by radius
             surf['rr'] += sphere[:3] / 1000.0  # move by center
             _complete_surface_info(surf, True)
-        # Make the grid of sources
-        sp = _make_volume_source_space(surf, pos, exclude, mindist)
+        # Make the grid of sources in MRI space
+        sp = _make_volume_source_space(surf, pos, exclude, mindist, mri,
+                                       volume_label)
 
-    # Compute an interpolation matrix to show data in an MRI volume
+    # Compute an interpolation matrix to show data in MRI_VOXEL coord frame
     if mri is not None:
-        _add_interpolator(sp, mri)
+        _add_interpolator(sp, mri, add_interpolator)
+    elif sp['type'] == 'vol':
+        # If there is no interpolator, it's actually a discrete source space
+        sp['type'] = 'discrete'
 
     if 'vol_dims' in sp:
         del sp['vol_dims']
@@ -1160,7 +1556,7 @@ def setup_volume_source_space(subject, fname=None, pos=5.0, mri=None,
 
 
 def _make_voxel_ras_trans(move, ras, voxel_size):
-    """Make a transformation for MRI voxel to MRI surface RAS"""
+    """Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)"""
     assert voxel_size.ndim == 1
     assert voxel_size.size == 3
     rot = ras.T * voxel_size[np.newaxis, :]
@@ -1168,8 +1564,7 @@ def _make_voxel_ras_trans(move, ras, voxel_size):
     assert rot.shape[0] == 3
     assert rot.shape[1] == 3
     trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
-    t = {'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL, 'to': FIFF.FIFFV_COORD_MRI,
-         'trans': trans}
+    t = Transform('mri_voxel', 'mri', trans)
     return t
 
 
@@ -1209,10 +1604,11 @@ def _make_discrete_source_space(pos):
     return sp
 
 
-def _make_volume_source_space(surf, grid, exclude, mindist):
+def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
+                              volume_label=None, do_neighbors=True, n_jobs=1):
     """Make a source space which covers the volume bounded by surf"""
 
-    # Figure out the grid size
+    # Figure out the grid size in the MRI coordinate frame
     mins = np.min(surf['rr'], axis=0)
     maxs = np.max(surf['rr'], axis=0)
     cm = np.mean(surf['rr'], axis=0)  # center of mass
@@ -1227,18 +1623,10 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     logger.info('Surface extent:')
     for c, mi, ma in zip('xyz', mins, maxs):
         logger.info('    %s = %6.1f ... %6.1f mm' % (c, 1000 * mi, 1000 * ma))
-    maxn = np.zeros(3, int)
-    minn = np.zeros(3, int)
-    for c in range(3):
-        if maxs[c] > 0:
-            maxn[c] = np.floor(np.abs(maxs[c]) / grid) + 1
-        else:
-            maxn[c] = -np.floor(np.abs(maxs[c]) / grid) - 1
-        if mins[c] > 0:
-            minn[c] = np.floor(np.abs(mins[c]) / grid) + 1
-        else:
-            minn[c] = -np.floor(np.abs(mins[c]) / grid) - 1
-
+    maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
+                     np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
+    minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
+                     np.floor(np.abs(m) / grid) - 1 for m in mins], int)
     logger.info('Grid extent:')
     for c, mi, ma in zip('xyz', minn, maxn):
         logger.info('    %s = %6.1f ... %6.1f mm'
@@ -1250,19 +1638,38 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     nrow = ns[0]
     ncol = ns[1]
     nplane = nrow * ncol
-    sp = dict(np=npts, rr=np.zeros((npts, 3)), nn=np.zeros((npts, 3)),
+    # x varies fastest, then y, then z (can use unravel to do this)
+    rr = meshgrid(np.arange(minn[2], maxn[2] + 1),
+                  np.arange(minn[1], maxn[1] + 1),
+                  np.arange(minn[0], maxn[0] + 1), indexing='ij')
+    x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
+    rr = np.array([x * grid, y * grid, z * grid]).T
+    sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
               inuse=np.ones(npts, int), type='vol', nuse=npts,
               coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
-    sp['nn'][:, 2] = 1.0  # Source orientation is immaterial
-
-    x = np.arange(minn[0], maxn[0] + 1)[np.newaxis, np.newaxis, :]
-    y = np.arange(minn[1], maxn[1] + 1)[np.newaxis, :, np.newaxis]
-    z = np.arange(minn[2], maxn[2] + 1)[:, np.newaxis, np.newaxis]
-    z = np.tile(z, (1, ns[1], ns[0])).ravel()
-    y = np.tile(y, (ns[2], 1, ns[0])).ravel()
-    x = np.tile(x, (ns[2], ns[1], 1)).ravel()
+    sp['nn'][:, 2] = 1.0
+    assert sp['rr'].shape[0] == npts
+
+    logger.info('%d sources before omitting any.', sp['nuse'])
+
+    # Exclude infeasible points
+    dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
+    bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
+    sp['inuse'][bads] = False
+    sp['nuse'] -= len(bads)
+    logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
+
+    _filter_source_spaces(surf, mindist, None, [sp], n_jobs)
+    logger.info('%d sources remaining after excluding the sources outside '
+                'the surface and less than %6.1f mm inside.'
+                % (sp['nuse'], mindist))
+
+    if not do_neighbors:
+        if volume_label is not None:
+            raise RuntimeError('volume_label cannot be None unless '
+                               'do_neighbors is True')
+        return sp
     k = np.arange(npts)
-    sp['rr'] = np.c_[x, y, z] * grid
     neigh = np.empty((26, npts), int)
     neigh.fill(-1)
 
@@ -1335,19 +1742,51 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     idx3 = np.logical_and(idx2, x < maxn[0])
     neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
 
-    logger.info('%d sources before omitting any.', sp['nuse'])
-
-    # Exclude infeasible points
-    dists = np.sqrt(np.sum((sp['rr'] - cm) ** 2, axis=1))
-    bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
-    sp['inuse'][bads] = False
-    sp['nuse'] -= len(bads)
-    logger.info('%d sources after omitting infeasible sources.', sp['nuse'])
-
-    _filter_source_spaces(surf, mindist, None, [sp])
-    logger.info('%d sources remaining after excluding the sources outside '
-                'the surface and less than %6.1f mm inside.'
-                % (sp['nuse'], mindist))
+    # Restrict sources to volume of interest
+    if volume_label is not None:
+        try:
+            import nibabel as nib
+        except ImportError:
+            raise ImportError("nibabel is required to read segmentation file.")
+
+        logger.info('Selecting voxels from %s' % volume_label)
+
+        # Read the segmentation data using nibabel
+        mgz = nib.load(mri)
+        mgz_data = mgz.get_data()
+
+        # Get the numeric index for this volume label
+        lut = _get_lut()
+        vol_id = _get_lut_id(lut, volume_label, True)
+
+        # Get indices for this volume label in voxel space
+        vox_bool = mgz_data == vol_id
+
+        # Get the 3 dimensional indices in voxel space
+        vox_xyz = np.array(np.where(vox_bool)).T
+
+        # Transform to RAS coordinates
+        # (use tkr normalization or volume won't align with surface sources)
+        trans = _get_mgz_header(mri)['vox2ras_tkr']
+        # Convert transform from mm to m
+        trans[:3] /= 1000.
+        rr_voi = apply_trans(trans, vox_xyz)  # positions of VOI in RAS space
+        # Filter out points too far from volume region voxels
+        dists = _compute_nearest(rr_voi, sp['rr'], return_dists=True)[1]
+        # Maximum distance from center of mass of a voxel to any of its corners
+        maxdist = np.sqrt(((trans[:3, :3].sum(0) / 2.) ** 2).sum())
+        bads = np.where(dists > maxdist)[0]
+
+        # Update source info
+        sp['inuse'][bads] = False
+        sp['vertno'] = np.where(sp['inuse'] > 0)[0]
+        sp['nuse'] = len(sp['vertno'])
+        sp['seg_name'] = volume_label
+        sp['mri_file'] = mri
+
+        # Update log
+        logger.info('%d sources remaining after excluding sources too far '
+                    'from VOI voxels', sp['nuse'])
 
     # Omit unused vertices from the neighborhoods
     logger.info('Adjusting the neighborhood info...')
@@ -1365,7 +1804,7 @@ def _make_volume_source_space(surf, grid, exclude, mindist):
     neigh.shape = old_shape
     neigh = neigh.T
     # Thought we would need this, but C code keeps -1 vertices, so we will:
-    #neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
+    # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
     sp['neighbor_vert'] = neigh
 
     # Set up the volume data (needed for creating the interpolation matrix)
@@ -1418,7 +1857,7 @@ def _get_mgz_header(fname):
     return header
 
 
-def _add_interpolator(s, mri_name):
+def _add_interpolator(s, mri_name, add_interpolator):
     """Compute a sparse matrix to interpolate the data into an MRI volume"""
     # extract transformation information from mri
     logger.info('Reading %s...' % mri_name)
@@ -1429,12 +1868,15 @@ def _add_interpolator(s, mri_name):
                   mri_depth=mri_depth))
     trans = header['vox2ras_tkr'].copy()
     trans[:3, :] /= 1000.0
-    s['vox_mri_t'] = {'trans': trans, 'from': FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
-                      'to': FIFF.FIFFV_COORD_MRI}  # ras_tkr
+    s['vox_mri_t'] = Transform('mri_voxel', 'mri', trans)  # ras_tkr
     trans = linalg.inv(np.dot(header['vox2ras_tkr'], header['ras2vox']))
     trans[:3, 3] /= 1000.0
-    s['mri_ras_t'] = {'trans': trans, 'from': FIFF.FIFFV_COORD_MRI,
-                      'to': FIFF.FIFFV_MNE_COORD_RAS}  # ras
+    s['mri_ras_t'] = Transform('mri', 'ras', trans)  # ras
+    s['mri_volume_name'] = mri_name
+    nvox = mri_width * mri_height * mri_depth
+    if not add_interpolator:
+        s['interpolator'] = sparse.csr_matrix((nvox, s['np']))
+        return
 
     _print_coord_trans(s['src_mri_t'], 'Source space : ')
     _print_coord_trans(s['vox_mri_t'], 'MRI volume : ')
@@ -1446,91 +1888,105 @@ def _add_interpolator(s, mri_name):
     #
     combo_trans = combine_transforms(s['vox_mri_t'],
                                      invert_transform(s['src_mri_t']),
-                                     FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
-                                     FIFF.FIFFV_MNE_COORD_MRI_VOXEL)
+                                     'mri_voxel', 'mri_voxel')
     combo_trans['trans'] = combo_trans['trans'].astype(np.float32)
 
     logger.info('Setting up interpolation...')
 
-    # Take *all* MRI vertices...
-    js = np.arange(mri_width, dtype=np.float32)
-    js = np.tile(js[np.newaxis, np.newaxis, :],
-                 (mri_depth, mri_height, 1)).ravel()
-    ks = np.arange(mri_height, dtype=np.float32)
-    ks = np.tile(ks[np.newaxis, :, np.newaxis],
-                 (mri_depth, 1, mri_width)).ravel()
-    ps = np.arange(mri_depth, dtype=np.float32)
-    ps = np.tile(ps[:, np.newaxis, np.newaxis],
-                 (1, mri_height, mri_width)).ravel()
-    r0 = np.c_[js, ks, ps]
-    # note we have the correct number of vertices
-    assert len(r0) == mri_width * mri_height * mri_depth
-
-    # ...and transform them from their MRI space into our source space's frame
-    # (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's really a subset
-    # of the entire volume!)
-    r0 = apply_trans(combo_trans['trans'], r0)
-    rn = np.floor(r0).astype(int)
-    maxs = (s['vol_dims'] - 1)[np.newaxis, :]
-    good = np.logical_and(np.all(rn >= 0, axis=1), np.all(rn < maxs, axis=1))
-    rn = rn[good]
-    r0 = r0[good]
-    # now we take each MRI voxel *in this space*, and figure out how to make
-    # its value the weighted sum of voxels in the volume source space. This
-    # is a 3D weighting scheme based (presumably) on the fact that we know
-    # we're interpolating from one volumetric grid into another.
-    jj = rn[:, 0]
-    kk = rn[:, 1]
-    pp = rn[:, 2]
-    vss = np.empty((8, len(jj)), int)
-    width = s['vol_dims'][0]
-    height = s['vol_dims'][1]
-    vss[0, :] = _vol_vertex(width, height, jj, kk, pp)
-    vss[1, :] = _vol_vertex(width, height, jj + 1, kk, pp)
-    vss[2, :] = _vol_vertex(width, height, jj + 1, kk + 1, pp)
-    vss[3, :] = _vol_vertex(width, height, jj, kk + 1, pp)
-    vss[4, :] = _vol_vertex(width, height, jj, kk, pp + 1)
-    vss[5, :] = _vol_vertex(width, height, jj + 1, kk, pp + 1)
-    vss[6, :] = _vol_vertex(width, height, jj + 1, kk + 1, pp + 1)
-    vss[7, :] = _vol_vertex(width, height, jj, kk + 1, pp + 1)
-    del jj, kk, pp
-    uses = np.any(s['inuse'][vss], axis=0)
-
-    verts = vss[:, uses].ravel()  # vertex (col) numbers in csr matrix
-    row_idx = np.tile(np.where(good)[0][uses], (8, 1)).ravel()
-
-    # figure out weights for each vertex
-    r0 = r0[uses]
-    rn = rn[uses]
-    xf = r0[:, 0] - rn[:, 0].astype(np.float32)
-    yf = r0[:, 1] - rn[:, 1].astype(np.float32)
-    zf = r0[:, 2] - rn[:, 2].astype(np.float32)
-    omxf = 1.0 - xf
-    omyf = 1.0 - yf
-    omzf = 1.0 - zf
-    weights = np.concatenate([omxf * omyf * omzf,  # correspond to rows of vss
+    # Loop over slices to save (lots of) memory
+    # Note that it is the slowest incrementing index
+    # This is equivalent to using mgrid and reshaping, but faster
+    data = []
+    indices = []
+    indptr = np.zeros(nvox + 1, np.int32)
+    for p in range(mri_depth):
+        js = np.arange(mri_width, dtype=np.float32)
+        js = np.tile(js[np.newaxis, :],
+                     (mri_height, 1)).ravel()
+        ks = np.arange(mri_height, dtype=np.float32)
+        ks = np.tile(ks[:, np.newaxis],
+                     (1, mri_width)).ravel()
+        ps = np.empty((mri_height, mri_width), np.float32).ravel()
+        ps.fill(p)
+        r0 = np.c_[js, ks, ps]
+        del js, ks, ps
+
+        # Transform our vertices from their MRI space into our source space's
+        # frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
+        # really a subset of the entire volume!)
+        r0 = apply_trans(combo_trans['trans'], r0)
+        rn = np.floor(r0).astype(int)
+        maxs = (s['vol_dims'] - 1)[np.newaxis, :]
+        good = np.where(np.logical_and(np.all(rn >= 0, axis=1),
+                                       np.all(rn < maxs, axis=1)))[0]
+        rn = rn[good]
+        r0 = r0[good]
+
+        # now we take each MRI voxel *in this space*, and figure out how
+        # to make its value the weighted sum of voxels in the volume source
+        # space. This is a 3D weighting scheme based (presumably) on the
+        # fact that we know we're interpolating from one volumetric grid
+        # into another.
+        jj = rn[:, 0]
+        kk = rn[:, 1]
+        pp = rn[:, 2]
+        vss = np.empty((len(jj), 8), np.int32)
+        width = s['vol_dims'][0]
+        height = s['vol_dims'][1]
+        jjp1 = jj + 1
+        kkp1 = kk + 1
+        ppp1 = pp + 1
+        vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
+        vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
+        vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
+        vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
+        vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
+        vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
+        vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
+        vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
+        del jj, kk, pp, jjp1, kkp1, ppp1
+        uses = np.any(s['inuse'][vss], axis=1)
+        if uses.size == 0:
+            continue
+        vss = vss[uses].ravel()  # vertex (col) numbers in csr matrix
+        indices.append(vss)
+        indptr[good[uses] + p * mri_height * mri_width + 1] = 8
+        del vss
+
+        # figure out weights for each vertex
+        r0 = r0[uses]
+        rn = rn[uses]
+        del uses, good
+        xf = r0[:, 0] - rn[:, 0].astype(np.float32)
+        yf = r0[:, 1] - rn[:, 1].astype(np.float32)
+        zf = r0[:, 2] - rn[:, 2].astype(np.float32)
+        omxf = 1.0 - xf
+        omyf = 1.0 - yf
+        omzf = 1.0 - zf
+        # each entry in the concatenation corresponds to a row of vss
+        data.append(np.array([omxf * omyf * omzf,
                               xf * omyf * omzf,
                               xf * yf * omzf,
                               omxf * yf * omzf,
                               omxf * omyf * zf,
                               xf * omyf * zf,
                               xf * yf * zf,
-                              omxf * yf * zf])
-    del xf, yf, zf, omxf, omyf, omzf
+                              omxf * yf * zf], order='F').T.ravel())
+        del xf, yf, zf, omxf, omyf, omzf
 
-    # Compose the sparse matrix
-    ij = (row_idx, verts)
-    nvox = mri_width * mri_height * mri_depth
-    interp = sparse.csr_matrix((weights, ij), shape=(nvox, s['np']))
-    s['interpolator'] = interp
-    s['mri_volume_name'] = mri_name
-    logger.info(' %d/%d nonzero values [done]' % (len(weights), nvox))
+        # Compose the sparse matrix
+    indptr = np.cumsum(indptr, out=indptr)
+    indices = np.concatenate(indices)
+    data = np.concatenate(data)
+    s['interpolator'] = sparse.csr_matrix((data, indices, indptr),
+                                          shape=(nvox, s['np']))
+    logger.info(' %d/%d nonzero values [done]' % (len(data), nvox))
 
 
 @verbose
 def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
                           verbose=None):
-    """Remove all source space points closer than a given limit"""
+    """Remove all source space points closer than a given limit (in mm)"""
     if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
         raise RuntimeError('Source spaces are in head coordinates and no '
                            'coordinate transform was provided!')
@@ -1559,8 +2015,7 @@ def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
             r1s = apply_trans(inv_trans['trans'], r1s)
 
         # Check that the source is inside surface (often the inner skull)
-        x = _sum_solids_div(r1s, surf, n_jobs)
-        outside = np.abs(x - 1.0) > 1e-5
+        outside = _points_outside_surface(r1s, surf, n_jobs)
         omit_outside = np.sum(outside)
 
         # vectorized nearest using BallTree (or cdist)
@@ -1589,35 +2044,95 @@ def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
     logger.info('Thank you for waiting.')
 
 
-def _sum_solids_div(fros, surf, n_jobs):
-    """Compute sum of solid angles according to van Oosterom for all tris"""
+ at verbose
+def _points_outside_surface(rr, surf, n_jobs=1, verbose=None):
+    """Check whether points are outside a surface
+
+    Parameters
+    ----------
+    rr : ndarray
+        Nx3 array of points to check.
+    surf : dict
+        Surface with entries "rr" and "tris".
+
+    Returns
+    -------
+    outside : ndarray
+        1D logical array of size N for which points are outside the surface.
+    """
+    rr = np.atleast_2d(rr)
+    assert rr.shape[1] == 3
     parallel, p_fun, _ = parallel_func(_get_solids, n_jobs)
-    tot_angles = parallel(p_fun(surf['rr'][tris], fros)
+    tot_angles = parallel(p_fun(surf['rr'][tris], rr)
                           for tris in np.array_split(surf['tris'], n_jobs))
-    return np.sum(tot_angles, axis=0) / (2 * np.pi)
+    return np.abs(np.sum(tot_angles, axis=0) / (2 * np.pi) - 1.0) > 1e-5
 
 
 def _get_solids(tri_rrs, fros):
     """Helper for computing _sum_solids_div total angle in chunks"""
     # NOTE: This incorporates the division by 4PI that used to be separate
+    # for tri_rr in tri_rrs:
+    #     v1 = fros - tri_rr[0]
+    #     v2 = fros - tri_rr[1]
+    #     v3 = fros - tri_rr[2]
+    #     triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
+    #     l1 = np.sqrt(np.sum(v1 * v1, axis=1))
+    #     l2 = np.sqrt(np.sum(v2 * v2, axis=1))
+    #     l3 = np.sqrt(np.sum(v3 * v3, axis=1))
+    #     s = (l1 * l2 * l3 +
+    #          np.sum(v1 * v2, axis=1) * l3 +
+    #          np.sum(v1 * v3, axis=1) * l2 +
+    #          np.sum(v2 * v3, axis=1) * l1)
+    #     tot_angle -= np.arctan2(triple, s)
+
+    # This is the vectorized version, but with a slicing heuristic to
+    # prevent memory explosion
     tot_angle = np.zeros((len(fros)))
-    for tri_rr in tri_rrs:
-        v1 = fros - tri_rr[0]
-        v2 = fros - tri_rr[1]
-        v3 = fros - tri_rr[2]
-        triple = np.sum(fast_cross_3d(v1, v2) * v3, axis=1)
-        l1 = np.sqrt(np.sum(v1 * v1, axis=1))
-        l2 = np.sqrt(np.sum(v2 * v2, axis=1))
-        l3 = np.sqrt(np.sum(v3 * v3, axis=1))
-        s = (l1 * l2 * l3 +
-             np.sum(v1 * v2, axis=1) * l3 +
-             np.sum(v1 * v3, axis=1) * l2 +
-             np.sum(v2 * v3, axis=1) * l1)
-        tot_angle -= np.arctan2(triple, s)
+    slices = np.r_[np.arange(0, len(fros), 100), [len(fros)]]
+    for i1, i2 in zip(slices[:-1], slices[1:]):
+        v1 = fros[i1:i2] - tri_rrs[:, 0, :][:, np.newaxis]
+        v2 = fros[i1:i2] - tri_rrs[:, 1, :][:, np.newaxis]
+        v3 = fros[i1:i2] - tri_rrs[:, 2, :][:, np.newaxis]
+        triples = _fast_cross_nd_sum(v1, v2, v3)
+        l1 = np.sqrt(np.sum(v1 * v1, axis=2))
+        l2 = np.sqrt(np.sum(v2 * v2, axis=2))
+        l3 = np.sqrt(np.sum(v3 * v3, axis=2))
+        ss = (l1 * l2 * l3 +
+              np.sum(v1 * v2, axis=2) * l3 +
+              np.sum(v1 * v3, axis=2) * l2 +
+              np.sum(v2 * v3, axis=2) * l1)
+        tot_angle[i1:i2] = -np.sum(np.arctan2(triples, ss), axis=0)
     return tot_angle
 
 
 @verbose
+def _ensure_src(src, verbose=None):
+    """Helper to ensure we have a source space"""
+    if isinstance(src, string_types):
+        if not op.isfile(src):
+            raise IOError('Source space file "%s" not found' % src)
+        logger.info('Reading %s...' % src)
+        src = read_source_spaces(src, verbose=False)
+    if not isinstance(src, SourceSpaces):
+        raise ValueError('src must be a string or instance of SourceSpaces')
+    return src
+
+
+def _ensure_src_subject(src, subject):
+    src_subject = src[0].get('subject_his_id', None)
+    if subject is None:
+        subject = src_subject
+        if subject is None:
+            raise ValueError('source space is too old, subject must be '
+                             'provided')
+    elif src_subject is not None and subject != src_subject:
+        raise ValueError('Mismatch between provided subject "%s" and subject '
+                         'name "%s" in the source space'
+                         % (subject, src_subject))
+    return subject
+
+
+ at verbose
 def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
     """Compute inter-source distances along the cortical surface
 
@@ -1661,15 +2176,14 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
     stored along with the source space data for future use.
     """
     n_jobs = check_n_jobs(n_jobs)
-    if not isinstance(src, SourceSpaces):
-        raise ValueError('"src" must be an instance of SourceSpaces')
+    src = _ensure_src(src)
     if not np.isscalar(dist_limit):
-        raise ValueError('limit must be a scalar')
-    if not check_scipy_version('0.11'):
+        raise ValueError('limit must be a scalar, got %s' % repr(dist_limit))
+    if not check_version('scipy', '0.11'):
         raise RuntimeError('scipy >= 0.11 must be installed (or > 0.13 '
                            'if dist_limit < np.inf')
 
-    if not all([s['type'] == 'surf' for s in src]):
+    if not all(s['type'] == 'surf' for s in src):
         raise RuntimeError('Currently all source spaces must be of surface '
                            'type')
 
@@ -1703,19 +2217,19 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
         min_dists.append(min_dist)
         min_idxs.append(min_idx)
         # now actually deal with distances, convert to sparse representation
-        d = np.concatenate([dd[0] for dd in d], axis=0)
-        i, j = np.meshgrid(s['vertno'], s['vertno'])
-        d = d.ravel()
-        i = i.ravel()
-        j = j.ravel()
+        d = np.concatenate([dd[0] for dd in d]).ravel()  # already float32
         idx = d > 0
-        d = sparse.csr_matrix((d[idx], (i[idx], j[idx])),
+        d = d[idx]
+        i, j = np.meshgrid(s['vertno'], s['vertno'])
+        i = i.ravel()[idx]
+        j = j.ravel()[idx]
+        d = sparse.csr_matrix((d, (i, j)),
                               shape=(s['np'], s['np']), dtype=np.float32)
         s['dist'] = d
         s['dist_limit'] = np.array([dist_limit], np.float32)
 
     # Let's see if our distance was sufficient to allow for patch info
-    if not any([np.any(np.isinf(md)) for md in min_dists]):
+    if not any(np.any(np.isinf(md)) for md in min_dists):
         # Patch info can be added!
         for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
             s['nearest'] = min_idx
@@ -1732,10 +2246,11 @@ def _do_src_distances(con, vertno, run_inds, limit):
         func = partial(sparse.csgraph.dijkstra, limit=limit)
     else:
         func = sparse.csgraph.dijkstra
-    chunk_size = 100  # save memory by chunking (only a little slower)
+    chunk_size = 20  # save memory by chunking (only a little slower)
     lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
     n_chunks = len(lims) - 1
-    d = np.empty((len(run_inds), len(vertno)))
+    # eventually we want this in float32, so save memory by only storing 32-bit
+    d = np.empty((len(run_inds), len(vertno)), np.float32)
     min_dist = np.empty((n_chunks, con.shape[0]))
     min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
     range_idx = np.arange(con.shape[0])
@@ -1751,3 +2266,319 @@ def _do_src_distances(con, vertno, run_inds, limit):
     min_idx = min_idx[midx, range_idx]
     d[d == np.inf] = 0  # scipy will give us np.inf for uncalc. distances
     return d, min_idx, min_dist
+
+
+def get_volume_labels_from_aseg(mgz_fname):
+    """Returns a list of names of segmented volumes.
+
+    Parameters
+    ----------
+    mgz_fname : str
+        Filename to read. Typically aseg.mgz or some variant in the freesurfer
+        pipeline.
+
+    Returns
+    -------
+    label_names : list of str
+        The names of segmented volumes included in this mgz file.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import nibabel as nib
+
+    # Read the mgz file using nibabel
+    mgz_data = nib.load(mgz_fname).get_data()
+
+    # Get the unique label names
+    lut = _get_lut()
+    label_names = [lut[lut['id'] == ii]['name'][0].decode('utf-8')
+                   for ii in np.unique(mgz_data)]
+    label_names = sorted(label_names, key=lambda n: n.lower())
+    return label_names
+
+
+def _get_hemi(s):
+    """Helper to get a hemisphere from a given source space"""
+    if s['type'] != 'surf':
+        raise RuntimeError('Only surface source spaces supported')
+    if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
+        return 'lh', 0, s['id']
+    elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
+        return 'rh', 1, s['id']
+    else:
+        raise ValueError('unknown surface ID %s' % s['id'])
+
+
+def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
+                       to_neighbor_tri=None):
+    """Helper to get a nearest-neigbor vertex match for a given hemi src
+
+    The to_neighbor_tri can optionally be passed in to avoid recomputation
+    if it's already available.
+    """
+    # adapted from mne_make_source_space.c, knowing accurate=False (i.e.
+    # nearest-neighbor mode should be used)
+    logger.info('Mapping %s %s -> %s (nearest neighbor)...'
+                % (hemi, subject_from, subject_to))
+    regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
+            for s in (subject_from, subject_to)]
+    reg_fro, reg_to = [_read_surface_geom(r, patch_stats=False) for r in regs]
+    if to_neighbor_tri is None:
+        to_neighbor_tri = _triangle_neighbors(reg_to['tris'], reg_to['np'])
+    morph_inuse = np.zeros(len(reg_to['rr']), bool)
+    best = np.zeros(fro_src['np'], int)
+    ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
+    for v, one in zip(fro_src['vertno'], ones):
+        # if it were actually a proper morph map, we would do this, but since
+        # we know it's nearest neighbor list, we don't need to:
+        # this_mm = mm[v]
+        # one = this_mm.indices[this_mm.data.argmax()]
+        if morph_inuse[one]:
+            # Try the nearest neighbors
+            neigh = _get_surf_neighbors(reg_to, one)  # on demand calc
+            was = one
+            one = neigh[np.where(~morph_inuse[neigh])[0]]
+            if len(one) == 0:
+                raise RuntimeError('vertex %d would be used multiple times.'
+                                   % one)
+            one = one[0]
+            logger.info('Source space vertex moved from %d to %d because of '
+                        'double occupation.' % (was, one))
+        best[v] = one
+        morph_inuse[one] = True
+    return best
+
+
+ at verbose
+def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
+                        subjects_dir=None, verbose=None):
+    """Morph an existing source space to a different subject
+
+    .. warning:: This can be used in place of morphing source estimates for
+                 multiple subjects, but there may be consequences in terms
+                 of dipole topology.
+
+    Parameters
+    ----------
+    src_from : instance of SourceSpaces
+        Surface source spaces to morph.
+    subject_to : str
+        The destination subject.
+    surf : str
+        The brain surface to use for the new source space.
+    subject_from : str | None
+        The "from" subject. For most source spaces this shouldn't need
+        to be provided, since it is stored in the source space itself.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    src : instance of SourceSpaces
+        The morphed source spaces.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+    """
+    # adapted from mne_make_source_space.c
+    src_from = _ensure_src(src_from)
+    subject_from = _ensure_src_subject(src_from, subject_from)
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    src_out = list()
+    for fro in src_from:
+        hemi, idx, id_ = _get_hemi(fro)
+        to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
+        logger.info('Reading destination surface %s' % (to,))
+        to = _read_surface_geom(to, patch_stats=False, verbose=False)
+        _complete_surface_info(to)
+        # Now we morph the vertices to the destination
+        # The C code does something like this, but with a nearest-neighbor
+        # mapping instead of the weighted one::
+        #
+        #     >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
+        #
+        # Here we use a direct NN calculation, since picking the max from the
+        # existing morph map (which naively one might expect to be equivalent)
+        # differs for ~3% of vertices.
+        best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
+                                  subjects_dir, to['neighbor_tri'])
+        for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
+                    'use_tris'):
+            del to[key]
+        to['vertno'] = np.sort(best[fro['vertno']])
+        to['inuse'] = np.zeros(len(to['rr']), int)
+        to['inuse'][to['vertno']] = True
+        to['use_tris'] = best[fro['use_tris']]
+        to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
+                  nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
+                  dist=None, id=id_, dist_limit=None, type='surf',
+                  coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
+                  rr=to['rr'] / 1000.)
+        src_out.append(to)
+        logger.info('[done]\n')
+    info = dict(working_dir=os.getcwd(),
+                command_line=_get_call_line(in_verbose=True))
+    return SourceSpaces(src_out, info=info)
+
+
+ at verbose
+def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
+                              subjects_dir=None, verbose=None):
+    """Get the reordering indices for a morphed source space
+
+    Parameters
+    ----------
+    vertices : list
+        The vertices for the left and right hemispheres.
+    src_from : instance of SourceSpaces
+        The original source space.
+    subject_from : str
+        The source subject.
+    subject_to : str
+        The destination subject.
+    subjects_dir : string, or None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    data_idx : ndarray, shape (n_vertices,)
+        The array used to reshape the data.
+    from_vertices : list
+        The right and left hemisphere vertex numbers for the "from" subject.
+    """
+    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+    from_vertices = list()
+    data_idxs = list()
+    offset = 0
+    for ii, hemi in enumerate(('lh', 'rh')):
+        # Get the mapping from the original source space to the destination
+        # subject's surface vertex numbers
+        best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
+                                  hemi, subjects_dir)
+        full_mapping = best[src_from[ii]['vertno']]
+        # Tragically, we might not have all of our vertno left (e.g. because
+        # some are omitted during fwd calc), so we must do some indexing magic:
+
+        # From all vertices, a subset could be chosen by fwd calc:
+        used_vertices = in1d(full_mapping, vertices[ii])
+        from_vertices.append(src_from[ii]['vertno'][used_vertices])
+        remaining_mapping = full_mapping[used_vertices]
+        if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
+                not in1d(vertices[ii], full_mapping).all():
+            raise RuntimeError('Could not map vertices, perhaps the wrong '
+                               'subject "%s" was provided?' % subject_from)
+
+        # And our data have been implicitly remapped by the forced ascending
+        # vertno order in source spaces
+        implicit_mapping = np.argsort(remaining_mapping)  # happens to data
+        data_idx = np.argsort(implicit_mapping)  # to reverse the mapping
+        data_idx += offset  # hemisphere offset
+        data_idxs.append(data_idx)
+        offset += len(implicit_mapping)
+    data_idx = np.concatenate(data_idxs)
+    # this one is really just a sanity check for us, should never be violated
+    # by users
+    assert np.array_equal(np.sort(data_idx),
+                          np.arange(sum(len(v) for v in vertices)))
+    return data_idx, from_vertices
+
+
+def _compare_source_spaces(src0, src1, mode='exact', dist_tol=1.5e-3):
+    """Compare two source spaces
+
+    Note: this function is also used by forward/tests/test_make_forward.py
+    """
+    from nose.tools import assert_equal, assert_true
+    from numpy.testing import assert_allclose, assert_array_equal
+    from scipy.spatial.distance import cdist
+    if mode != 'exact' and 'approx' not in mode:  # 'nointerp' can be appended
+        raise RuntimeError('unknown mode %s' % mode)
+
+    for s0, s1 in zip(src0, src1):
+        # first check the keys
+        a, b = set(s0.keys()), set(s1.keys())
+        assert_equal(a, b, str(a ^ b))
+        for name in ['nuse', 'ntri', 'np', 'type', 'id']:
+            assert_equal(s0[name], s1[name], name)
+        for name in ['subject_his_id']:
+            if name in s0 or name in s1:
+                assert_equal(s0[name], s1[name], name)
+        for name in ['interpolator']:
+            if name in s0 or name in s1:
+                diffs = (s0['interpolator'] - s1['interpolator']).data
+                if len(diffs) > 0 and 'nointerp' not in mode:
+                    # 5%
+                    assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.10, name)
+        for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
+            if s0[name] is None:
+                assert_true(s1[name] is None, name)
+            else:
+                if mode == 'exact':
+                    assert_array_equal(s0[name], s1[name], name)
+                else:  # 'approx' in mode
+                    atol = 1e-3 if name == 'nn' else 1e-4
+                    assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
+                                    err_msg=name)
+        for name in ['seg_name']:
+            if name in s0 or name in s1:
+                assert_equal(s0[name], s1[name], name)
+        if mode == 'exact':
+            for name in ['inuse', 'vertno', 'use_tris']:
+                assert_array_equal(s0[name], s1[name], err_msg=name)
+            # these fields will exist if patch info was added, these are
+            # not tested in mode == 'approx'
+            for name in ['nearest', 'nearest_dist']:
+                if s0[name] is None:
+                    assert_true(s1[name] is None, name)
+                else:
+                    assert_array_equal(s0[name], s1[name])
+            for name in ['dist_limit']:
+                assert_true(s0[name] == s1[name], name)
+            for name in ['dist']:
+                if s0[name] is not None:
+                    assert_equal(s1[name].shape, s0[name].shape)
+                    assert_true(len((s0['dist'] - s1['dist']).data) == 0)
+            for name in ['pinfo']:
+                if s0[name] is not None:
+                    assert_true(len(s0[name]) == len(s1[name]))
+                    for p1, p2 in zip(s0[name], s1[name]):
+                        assert_true(all(p1 == p2))
+        else:  # 'approx' in mode:
+            # deal with vertno, inuse, and use_tris carefully
+            assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0],
+                               'left hemisphere vertices')
+            assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0],
+                               'right hemisphere vertices')
+            assert_equal(len(s0['vertno']), len(s1['vertno']))
+            agreement = np.mean(s0['inuse'] == s1['inuse'])
+            assert_true(agreement >= 0.99, "%s < 0.99" % agreement)
+            if agreement < 1.0:
+                # make sure mismatched vertno are within 1.5mm
+                v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
+                v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
+                dists = cdist(s0['rr'][v0], s1['rr'][v1])
+                assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
+                                atol=dist_tol, err_msg='mismatched vertno')
+            if s0['use_tris'] is not None:  # for "spacing"
+                assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
+            else:
+                assert_true(s1['use_tris'] is None)
+            assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
+    # The above "if s0[name] is not None" can be removed once the sample
+    # dataset is updated to have a source space with distance info
+    for name in ['working_dir', 'command_line']:
+        if mode == 'exact':
+            assert_equal(src0.info[name], src1.info[name])
+        else:  # 'approx' in mode:
+            if name in src0.info:
+                assert_true(name in src1.info, '"%s" missing' % name)
+            else:
+                assert_true(name not in src1.info,
+                            '"%s" should not exist' % name)
diff --git a/mne/stats/__init__.py b/mne/stats/__init__.py
index 31381f3..b45141e 100644
--- a/mne/stats/__init__.py
+++ b/mne/stats/__init__.py
@@ -1,6 +1,7 @@
 """Functions for statistical analysis"""
 
-from .parametric import f_threshold_twoway_rm, f_twoway_rm
+from .parametric import (
+    f_threshold_twoway_rm, f_threshold_mway_rm, f_twoway_rm, f_mway_rm)
 from .permutations import permutation_t_test
 from .cluster_level import (permutation_cluster_test,
                             permutation_cluster_1samp_test,
@@ -10,4 +11,4 @@ from .cluster_level import (permutation_cluster_test,
                             ttest_1samp_no_p,
                             summarize_clusters_stc)
 from .multi_comp import fdr_correction, bonferroni_correction
-from .regression import linear_regression
+from .regression import linear_regression, linear_regression_raw
diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py
index 8fbc3a9..d0b1ec6 100755
--- a/mne/stats/cluster_level.py
+++ b/mne/stats/cluster_level.py
@@ -10,12 +10,13 @@
 # License: Simplified BSD
 
 import numpy as np
-from scipy import stats, sparse, ndimage
 import warnings
+import logging
+from scipy import sparse
 
 from .parametric import f_oneway
 from ..parallel import parallel_func, check_n_jobs
-from ..utils import split_list, logger, verbose
+from ..utils import split_list, logger, verbose, ProgressBar
 from ..fixes import in1d, unravel_index
 from ..source_estimate import SourceEstimate
 
@@ -42,7 +43,7 @@ def _get_clusters_spatial(s, neighbors):
             # look across other vertices
             buddies = np.where(r)[0]
             buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
-                                      assume_unique=True)]
+                                   assume_unique=True)]
             t_inds += buddies.tolist()
             r[buddies] = False
             icount += 1
@@ -152,7 +153,7 @@ def _get_clusters_st_multistep(keepers, neighbors, max_step=1):
                 buddies = inds[t_border[t[ind]]:t_border[t[ind] + 1]]
                 buddies = buddies[r[buddies]]
                 buddies = buddies[in1d(s[buddies], neighbors[s[ind]],
-                                          assume_unique=True)]
+                                       assume_unique=True)]
                 buddies = np.concatenate((selves, buddies))
                 t_inds += buddies.tolist()
                 r[buddies] = False
@@ -184,8 +185,8 @@ def _get_clusters_st(x_in, neighbors, max_step=1):
             order = np.argsort(row)
             row = row[order]
             col = col[order]
-            lims = [0] + (np.where(np.diff(row) > 0)[0]
-                          + 1).tolist() + [len(row)]
+            lims = [0] + (np.where(np.diff(row) > 0)[0] +
+                          1).tolist() + [len(row)]
 
         for start, end in zip(lims[:-1], lims[1:]):
             keepers[row[start]] = np.sort(col[start:end])
@@ -228,14 +229,14 @@ def _get_components(x_in, connectivity, return_list=True):
     connectivity = sparse.coo_matrix((data, (row, col)), shape=shape)
     _, components = cs_graph_components(connectivity)
     if return_list:
-        labels = np.unique(components)
-        clusters = list()
-        for l in labels:
-            c = np.where(components == l)[0]
-            if np.any(x_in[c]):
-                clusters.append(c)
-        # logger.info("-- number of components : %d"
-        #             % np.unique(components).size)
+        start = np.min(components)
+        stop = np.max(components)
+        comp_list = [list() for i in range(start, stop + 1, 1)]
+        mask = np.zeros(len(comp_list), dtype=bool)
+        for ii, comp in enumerate(components):
+            comp_list[comp].append(ii)
+            mask[comp] += x_in[ii]
+        clusters = [np.array(k) for k, m in zip(comp_list, mask) if m]
         return clusters
     else:
         return components
@@ -292,7 +293,8 @@ def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
     sums: array
         Sum of x values in clusters.
     """
-    if not tail in [-1, 0, 1]:
+    from scipy import ndimage
+    if tail not in [-1, 0, 1]:
         raise ValueError('invalid tail parameter')
 
     x = np.asanyarray(x)
@@ -301,7 +303,7 @@ def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
         if not isinstance(threshold, dict):
             raise TypeError('threshold must be a number, or a dict for '
                             'threshold-free cluster enhancement')
-        if not all([key in threshold for key in ['start', 'step']]):
+        if not all(key in threshold for key in ['start', 'step']):
             raise KeyError('threshold, if dict, must have at least '
                            '"start" and "step"')
         tfce = True
@@ -364,7 +366,8 @@ def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
         for x_in in x_ins:
             if np.any(x_in):
                 out = _find_clusters_1dir_parts(x, x_in, connectivity,
-                                                max_step, partitions, t_power)
+                                                max_step, partitions, t_power,
+                                                ndimage)
                 clusters += out[0]
                 sums = np.concatenate((sums, out[1]))
         if tfce is True:
@@ -404,26 +407,27 @@ def _find_clusters(x, threshold, tail=0, connectivity=None, max_step=1,
 
 
 def _find_clusters_1dir_parts(x, x_in, connectivity, max_step, partitions,
-                              t_power):
+                              t_power, ndimage):
     """Deal with partitions, and pass the work to _find_clusters_1dir
     """
     if partitions is None:
         clusters, sums = _find_clusters_1dir(x, x_in, connectivity, max_step,
-                                             t_power)
+                                             t_power, ndimage)
     else:
         # cluster each partition separately
         clusters = list()
         sums = list()
         for p in range(np.max(partitions) + 1):
             x_i = np.logical_and(x_in, partitions == p)
-            out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power)
+            out = _find_clusters_1dir(x, x_i, connectivity, max_step, t_power,
+                                      ndimage)
             clusters += out[0]
             sums.append(out[1])
         sums = np.concatenate(sums)
     return clusters, sums
 
 
-def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power):
+def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power, ndimage):
     """Actually call the clustering algorithm"""
     if connectivity is None:
         labels, n_labels = ndimage.label(x_in)
@@ -432,15 +436,15 @@ def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power):
             # slices
             clusters = ndimage.find_objects(labels, n_labels)
             if len(clusters) == 0:
-                sums = []
+                sums = list()
             else:
+                index = list(range(1, n_labels + 1))
                 if t_power == 1:
-                    sums = ndimage.measurements.sum(x, labels,
-                                                  index=list(range(1, n_labels + 1)))
+                    sums = ndimage.measurements.sum(x, labels, index=index)
                 else:
                     sums = ndimage.measurements.sum(np.sign(x) *
-                                                  np.abs(x) ** t_power, labels,
-                                                  index=list(range(1, n_labels + 1)))
+                                                    np.abs(x) ** t_power,
+                                                    labels, index=index)
         else:
             # boolean masks (raveled)
             clusters = list()
@@ -494,7 +498,7 @@ def _pval_from_histogram(T, H0, tail):
     For each stat compute a p-value as percentile of its statistics
     within all statistics in surrogate data
     """
-    if not tail in [-1, 0, 1]:
+    if tail not in [-1, 0, 1]:
         raise ValueError('invalid tail parameter')
 
     # from pct to fraction
@@ -526,7 +530,7 @@ def _setup_connectivity(connectivity, n_vertices, n_times):
 
 def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
                      max_step, include, partitions, t_power, seeds,
-                     sample_shape, buffer_size):
+                     sample_shape, buffer_size, progress_bar):
 
     n_samp, n_vars = X_full.shape
 
@@ -542,6 +546,10 @@ def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
                     for s in slices]
 
     for seed_idx, seed in enumerate(seeds):
+        if progress_bar is not None:
+            if (not (seed_idx + 1) % 32) or (seed_idx == 0):
+                progress_bar.update(seed_idx + 1)
+
         # shuffle sample indices
         rng = np.random.RandomState(seed)
         idx_shuffled = np.arange(n_samp)
@@ -590,7 +598,7 @@ def _do_permutations(X_full, slices, threshold, tail, connectivity, stat_fun,
 
 def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
                            max_step, include, partitions, t_power, seeds,
-                           sample_shape, buffer_size):
+                           sample_shape, buffer_size, progress_bar):
     n_samp, n_vars = X.shape
     assert slices is None  # should be None for the 1 sample case
 
@@ -605,6 +613,10 @@ def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
         X_flip_buffer = np.empty((n_samp, buffer_size), dtype=X.dtype)
 
     for seed_idx, seed in enumerate(seeds):
+        if progress_bar is not None:
+            if not (seed_idx + 1) % 32 or seed_idx == 0:
+                progress_bar.update(seed_idx + 1)
+
         if isinstance(seed, np.ndarray):
             # new surrogate data with specified sign flip
             if not seed.size == n_samp:
@@ -619,11 +631,15 @@ def _do_1samp_permutations(X, slices, threshold, tail, connectivity, stat_fun,
             signs = signs[:, np.newaxis]
 
         if buffer_size is None:
-            X *= signs
-            # Recompute statistic on randomized data
-            T_obs_surr = stat_fun(X)
-            # Set X back to previous state (trade memory eff. for CPU use)
-            X *= signs
+            # be careful about non-writable memmap (GH#1507)
+            if X.flags.writeable:
+                X *= signs
+                # Recompute statistic on randomized data
+                T_obs_surr = stat_fun(X)
+                # Set X back to previous state (trade memory eff. for CPU use)
+                X *= signs
+            else:
+                T_obs_surr = stat_fun(X * signs)
         else:
             # only sign-flip a small data buffer, so we need less memory
             T_obs_surr = np.empty(n_vars, dtype=X.dtype)
@@ -671,8 +687,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
     either a 1 sample t-test or an f-test / more sample permutation scheme
     is elicited.
     """
-
-    if not out_type in ['mask', 'indices']:
+    if out_type not in ['mask', 'indices']:
         raise ValueError('out_type must be either \'mask\' or \'indices\'')
 
     # check dimensions for each group in X (a list at this stage).
@@ -726,7 +741,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
         partitions = _get_partitions_from_connectivity(connectivity, n_times)
     else:
         partitions = None
-
+    logger.info('Running intial clustering')
     out = _find_clusters(T_obs, threshold, tail, connectivity,
                          max_step=max_step, include=include,
                          partitions=partitions, t_power=t_power,
@@ -761,12 +776,17 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
         n_samples_per_condition = [x.shape[0] for x in X]
         splits_idx = np.append([0], np.cumsum(n_samples_per_condition))
         slices = [slice(splits_idx[k], splits_idx[k + 1])
-                                                    for k in range(len(X))]
-
+                  for k in range(len(X))]
     parallel, my_do_perm_func, _ = parallel_func(do_perm_func, n_jobs)
 
     # Step 2: If we have some clusters, repeat process on permuted data
     # -------------------------------------------------------------------
+
+    def get_progress_bar(seeds):
+        # make sure the progress bar adds to up 100% across n jobs
+        return (ProgressBar(len(seeds), spinner=True) if
+                logger.level <= logging.INFO else None)
+
     if len(clusters) > 0:
         # check to see if we can do an exact test
         # note for a two-tailed test, we can exploit symmetry to just do half
@@ -790,6 +810,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
         total_removed = 0
         step_down_include = None  # start out including all points
         n_step_downs = 0
+
         while n_removed > 0:
             # actually do the clustering for each partition
             if include is not None:
@@ -799,11 +820,14 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
                     this_include = include
             else:
                 this_include = step_down_include
+            logger.info('Permuting ...')
             H0 = parallel(my_do_perm_func(X_full, slices, threshold, tail,
                           connectivity, stat_fun, max_step, this_include,
-                          partitions, t_power, s, sample_shape, buffer_size)
+                          partitions, t_power, s, sample_shape, buffer_size,
+                          get_progress_bar(s))
                           for s in split_list(seeds, n_jobs))
             H0 = np.concatenate(H0)
+            logger.info('Computing cluster p-values')
             cluster_pv = _pval_from_histogram(cluster_stats, H0, tail)
 
             # figure out how many new ones will be removed for step-down
@@ -822,7 +846,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun,
                 logger.info('Step-down-in-jumps iteration #%i found %i %s'
                             'cluster%s to exclude from subsequent iterations'
                             % (n_step_downs, n_removed, a_text, pl))
-
+        logger.info('Done.')
         # The clusters should have the same shape as the samples
         clusters = _reshape_clusters(clusters, sample_shape)
         return T_obs, clusters, cluster_pv, H0
@@ -864,7 +888,7 @@ def ttest_1samp_no_p(X, sigma=0, method='relative'):
     voxels in statistical parametric mapping; a new hat avoids a 'haircut'",
     NeuroImage. 2012 Feb 1;59(3):2131-41.
     """
-    if not method in ['absolute', 'relative']:
+    if method not in ['absolute', 'relative']:
         raise ValueError('method must be "absolute" or "relative", not %s'
                          % method)
     var = np.var(X, axis=0, ddof=1)
@@ -883,16 +907,23 @@ def permutation_cluster_test(X, threshold=None, n_permutations=1024,
                              check_disjoint=False, buffer_size=1000):
     """Cluster-level statistical permutation test
 
-    For a list of 2d-arrays of data, e.g. power values, calculate some
-    statistics for each timepoint (dim 1) over groups.  Do a cluster
-    analysis with permutation test for calculating corrected p-values.
-    Randomized data are generated with random partitions of the data.
+    For a list of nd-arrays of data, e.g. 2d for time series or 3d for
+    time-frequency power values, calculate some statistics corrected for
+    multiple comparisons using permutations and cluster level correction.
+    Each element of the list X contains the data for one group of
+    observations. Randomized data are generated with random partitions
+    of the data.
 
     Parameters
     ----------
     X : list
-        List of 2d-arrays containing the data, dim 1: timepoints, dim 2:
-        elements of groups.
+        List of nd-arrays containing the data. Each element of X contains
+        the samples for one group. First dimension of each element is the
+        number of samples/observations in this group. The other dimensions
+        are for the size of the observations. For example if X = [X1, X2]
+        with X1.shape = (20, 50, 4) and X2.shape = (17, 50, 4) one has
+        2 groups with respectively 20 and 17 observations in each.
+        Each data point is of shape (50, 4).
     threshold : float | dict | None
         If threshold is None, it will choose a t-threshold equivalent to
         p < 0.05 for the given number of (within-subject) observations.
@@ -977,22 +1008,26 @@ def permutation_cluster_test(X, threshold=None, n_permutations=1024,
     Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
     doi:10.1016/j.jneumeth.2007.03.024
     """
+    from scipy import stats
+    ppf = stats.f.ppf
     if threshold is None:
         p_thresh = 0.05 / (1 + (tail == 0))
         n_samples_per_group = [len(x) for x in X]
-        threshold = stats.distributions.f.ppf(1. - p_thresh,
-                                              *n_samples_per_group)
+        threshold = ppf(1. - p_thresh, *n_samples_per_group)
         if np.sign(tail) < 0:
             threshold = -threshold
 
     return _permutation_cluster_test(X=X, threshold=threshold,
-                        n_permutations=n_permutations,
-                        tail=tail, stat_fun=stat_fun,
-                        connectivity=connectivity, verbose=verbose,
-                        n_jobs=n_jobs, seed=seed, max_step=max_step,
-                        exclude=exclude, step_down_p=step_down_p,
-                        t_power=t_power, out_type=out_type,
-                        check_disjoint=check_disjoint, buffer_size=buffer_size)
+                                     n_permutations=n_permutations,
+                                     tail=tail, stat_fun=stat_fun,
+                                     connectivity=connectivity,
+                                     verbose=verbose,
+                                     n_jobs=n_jobs, seed=seed,
+                                     max_step=max_step,
+                                     exclude=exclude, step_down_p=step_down_p,
+                                     t_power=t_power, out_type=out_type,
+                                     check_disjoint=check_disjoint,
+                                     buffer_size=buffer_size)
 
 
 permutation_cluster_test.__test__ = False
@@ -1108,22 +1143,28 @@ def permutation_cluster_1samp_test(X, threshold=None, n_permutations=1024,
     Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
     doi:10.1016/j.jneumeth.2007.03.024
     """
+    from scipy import stats
+    ppf = stats.t.ppf
     if threshold is None:
         p_thresh = 0.05 / (1 + (tail == 0))
         n_samples = len(X)
-        threshold = -stats.distributions.t.ppf(p_thresh, n_samples - 1)
+        threshold = -ppf(p_thresh, n_samples - 1)
         if np.sign(tail) < 0:
             threshold = -threshold
 
     X = [X]  # for one sample only one data array
-    return _permutation_cluster_test(X=X, threshold=threshold,
-                        n_permutations=n_permutations,
-                        tail=tail, stat_fun=stat_fun,
-                        connectivity=connectivity, verbose=verbose,
-                        n_jobs=n_jobs, seed=seed, max_step=max_step,
-                        exclude=exclude, step_down_p=step_down_p,
-                        t_power=t_power, out_type=out_type,
-                        check_disjoint=check_disjoint, buffer_size=buffer_size)
+    return _permutation_cluster_test(X=X,
+                                     threshold=threshold,
+                                     n_permutations=n_permutations,
+                                     tail=tail, stat_fun=stat_fun,
+                                     connectivity=connectivity,
+                                     verbose=verbose,
+                                     n_jobs=n_jobs, seed=seed,
+                                     max_step=max_step,
+                                     exclude=exclude, step_down_p=step_down_p,
+                                     t_power=t_power, out_type=out_type,
+                                     check_disjoint=check_disjoint,
+                                     buffer_size=buffer_size)
 
 
 permutation_cluster_1samp_test.__test__ = False
@@ -1131,10 +1172,13 @@ permutation_cluster_1samp_test.__test__ = False
 
 @verbose
 def spatio_temporal_cluster_1samp_test(X, threshold=None,
-        n_permutations=1024, tail=0, stat_fun=ttest_1samp_no_p,
-        connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
-        spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
-        check_disjoint=False, buffer_size=1000):
+                                       n_permutations=1024, tail=0,
+                                       stat_fun=ttest_1samp_no_p,
+                                       connectivity=None, verbose=None,
+                                       n_jobs=1, seed=None, max_step=1,
+                                       spatial_exclude=None, step_down_p=0,
+                                       t_power=1, out_type='indices',
+                                       check_disjoint=False, buffer_size=1000):
     """Non-parametric cluster-level 1 sample T-test for spatio-temporal data
 
     This function provides a convenient wrapper for data organized in the form
@@ -1247,11 +1291,15 @@ def spatio_temporal_cluster_1samp_test(X, threshold=None,
 
     # do the heavy lifting
     out = permutation_cluster_1samp_test(X, threshold=threshold,
-              stat_fun=stat_fun, tail=tail, n_permutations=n_permutations,
-              connectivity=connectivity, n_jobs=n_jobs, seed=seed,
-              max_step=max_step, exclude=exclude, step_down_p=step_down_p,
-              t_power=t_power, out_type=out_type,
-              check_disjoint=check_disjoint, buffer_size=buffer_size)
+                                         stat_fun=stat_fun, tail=tail,
+                                         n_permutations=n_permutations,
+                                         connectivity=connectivity,
+                                         n_jobs=n_jobs, seed=seed,
+                                         max_step=max_step, exclude=exclude,
+                                         step_down_p=step_down_p,
+                                         t_power=t_power, out_type=out_type,
+                                         check_disjoint=check_disjoint,
+                                         buffer_size=buffer_size)
     return out
 
 
@@ -1259,11 +1307,12 @@ spatio_temporal_cluster_1samp_test.__test__ = False
 
 
 @verbose
-def spatio_temporal_cluster_test(X, threshold=1.67,
-        n_permutations=1024, tail=0, stat_fun=f_oneway,
-        connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
-        spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
-        check_disjoint=False, buffer_size=1000):
+def spatio_temporal_cluster_test(X, threshold=1.67, n_permutations=1024,
+                                 tail=0, stat_fun=f_oneway,
+                                 connectivity=None, verbose=None, n_jobs=1,
+                                 seed=None, max_step=1, spatial_exclude=None,
+                                 step_down_p=0, t_power=1, out_type='indices',
+                                 check_disjoint=False, buffer_size=1000):
     """Non-parametric cluster-level test for spatio-temporal data
 
     This function provides a convenient wrapper for data organized in the form
@@ -1360,11 +1409,14 @@ def spatio_temporal_cluster_test(X, threshold=1.67,
 
     # do the heavy lifting
     out = permutation_cluster_test(X, threshold=threshold,
-              stat_fun=stat_fun, tail=tail, n_permutations=n_permutations,
-              connectivity=connectivity, n_jobs=n_jobs, seed=seed,
-              max_step=max_step, exclude=exclude, step_down_p=step_down_p,
-              t_power=t_power, out_type=out_type,
-              check_disjoint=check_disjoint, buffer_size=buffer_size)
+                                   stat_fun=stat_fun, tail=tail,
+                                   n_permutations=n_permutations,
+                                   connectivity=connectivity, n_jobs=n_jobs,
+                                   seed=seed, max_step=max_step,
+                                   exclude=exclude, step_down_p=step_down_p,
+                                   t_power=t_power, out_type=out_type,
+                                   check_disjoint=check_disjoint,
+                                   buffer_size=buffer_size)
     return out
 
 
@@ -1445,7 +1497,7 @@ def _reshape_clusters(clusters, sample_shape):
 
 
 def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
-    subject='fsaverage', vertno=[np.arange(10242), np.arange(10242)]):
+                           subject='fsaverage', vertices=None):
     """ Assemble summary SourceEstimate from spatiotemporal cluster results
 
     This helps visualizing results from spatio-temporal-clustering
@@ -1463,16 +1515,21 @@ def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
         The time of the first sample.
     subject : str
         The name of the subject.
-    vertno : list of arrays
-        The vertex numbers associated with the source space locations.
+    vertices : list of arrays | None
+        The vertex numbers associated with the source space locations. Defaults
+        to None. If None, equals ```[np.arange(10242), np.arange(10242)]```.
 
     Returns
     -------
     out : instance of SourceEstimate
     """
+    if vertices is None:
+        vertices = [np.arange(10242), np.arange(10242)]
+
     T_obs, clusters, clu_pvals, _ = clu
     n_times, n_vertices = T_obs.shape
     good_cluster_inds = np.where(clu_pvals < p_thresh)[0]
+
     #  Build a convenient representation of each cluster, where each
     #  cluster becomes a "time point" in the SourceEstimate
     if len(good_cluster_inds) > 0:
@@ -1483,15 +1540,16 @@ def summarize_clusters_stc(clu, p_thresh=0.05, tstep=1e-3, tmin=0,
             v_inds = clusters[cluster_ind][1]
             t_inds = clusters[cluster_ind][0]
             data[v_inds, t_inds] = T_obs[t_inds, v_inds]
-            # Store a nice visualization of the cluster by summing across time (in ms)
+            # Store a nice visualization of the cluster by summing across time
             data = np.sign(data) * np.logical_not(data == 0) * tstep
             data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
             # Make the first "time point" a sum across all clusters for easy
             # visualization
         data_summary[:, 0] = np.sum(data_summary, axis=1)
 
-        return SourceEstimate(data_summary, vertno, tmin=tmin, tstep=tstep,
+        return SourceEstimate(data_summary, vertices, tmin=tmin, tstep=tstep,
                               subject=subject)
     else:
         raise RuntimeError('No significant clusters available. Please adjust '
-                           'your threshold or check your statistical analysis.')
+                           'your threshold or check your statistical '
+                           'analysis.')
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
index 37e4eee..a26b4a7 100644
--- a/mne/stats/multi_comp.py
+++ b/mne/stats/multi_comp.py
@@ -83,7 +83,7 @@ def bonferroni_correction(pval, alpha=0.05):
 
     Parameters
     ----------
-    pvals : array_like
+    pval : array_like
         set of p-values of the individual tests.
     alpha : float
         error rate
@@ -98,5 +98,5 @@ def bonferroni_correction(pval, alpha=0.05):
     """
     pval = np.asarray(pval)
     pval_corrected = pval * float(pval.size)
-    reject = pval < alpha
+    reject = pval_corrected < alpha
     return reject, pval_corrected
diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py
index 5987370..ed7fbe3 100644
--- a/mne/stats/parametric.py
+++ b/mne/stats/parametric.py
@@ -1,33 +1,22 @@
-import numpy as np
-from scipy import stats
-from scipy.stats import f
-fprob = f.sf  # stats.fprob is deprecated
-from scipy.signal import detrend
-from ..fixes import matrix_rank
-from functools import reduce
-from ..externals.six.moves import map  # analysis:ignore
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Denis Engemann <denis.engemann at gmail.com>
 #          Eric Larson <larson.eric.d at gmail.com>
 #
 # License: Simplified BSD
 
-defaults_twoway_rm = {
-    'parse': {
-        'A': [0],
-        'B': [1],
-        'A+B': [0, 1],
-        'A:B': [2],
-        'A*B': [0, 1, 2]
-        },
-    'iter_contrasts': np.array([(1, 0, 1), (0, 1, 1), (1, 1, 1)])
-    }
+import numpy as np
+from functools import reduce
+from string import ascii_uppercase
 
+from ..externals.six import string_types
+from ..utils import deprecated
+from ..fixes import matrix_rank
 
 # The following function is a rewriting of scipy.stats.f_oneway
 # Contrary to the scipy.stats.f_oneway implementation it does not
 # copy the data while keeping the inputs unchanged.
+
+
 def _f_oneway(*args):
     """
     Performs a 1-way ANOVA.
@@ -76,6 +65,8 @@ def _f_oneway(*args):
     .. [2] Heiman, G.W.  Research Methods in Statistics. 2002.
 
     """
+    from scipy import stats
+    sf = stats.f.sf
     n_classes = len(args)
     n_samples_per_class = np.array([len(a) for a in args])
     n_samples = np.sum(n_samples_per_class)
@@ -95,7 +86,7 @@ def _f_oneway(*args):
     msb = ssbn / float(dfbn)
     msw = sswn / float(dfwn)
     f = msb / msw
-    prob = fprob(dfbn, dfwn, f)
+    prob = sf(dfbn, dfwn, f)
     return f, prob
 
 
@@ -104,19 +95,76 @@ def f_oneway(*args):
     return _f_oneway(*args)[0]
 
 
-def _check_effects(effects):
-    """ Aux Function """
-    if effects.upper() not in defaults_twoway_rm['parse']:
-        raise ValueError('The value passed for `effects` is not supported. '
-                         'Please consider the documentation.')
-
-    return defaults_twoway_rm['parse'][effects]
+def _map_effects(n_factors, effects):
+    """Map effects to indices"""
+    if n_factors > len(ascii_uppercase):
+        raise ValueError('Maximum number of factors supported is 26')
+
+    factor_names = list(ascii_uppercase[:n_factors])
+
+    if isinstance(effects, string_types):
+        if '*' in effects and ':' in effects:
+            raise ValueError('Not "*" and ":" permitted in effects')
+        elif '+' in effects and ':' in effects:
+            raise ValueError('Not "+" and ":" permitted in effects')
+        elif effects == 'all':
+            effects = None
+        elif len(effects) == 1 or ':' in effects:
+            effects = [effects]
+        elif '+' in effects:
+            # all main effects
+            effects = effects.split('+')
+        elif '*' in effects:
+            pass  # handle later
+        else:
+            raise ValueError('"{0}" is not a valid option for "effects"'
+                             .format(effects))
+    if isinstance(effects, list):
+        bad_names = [e for e in effects if e not in factor_names]
+        if len(bad_names) > 1:
+            raise ValueError('Effect names: {0} are not valid. They should '
+                             'the first `n_factors` ({1}) characters from the'
+                             'alphabet'.format(bad_names, n_factors))
+
+    indices = list(np.arange(2 ** n_factors - 1))
+    names = list()
+    for this_effect in indices:
+        contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
+        this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
+        this_name = [factor_names[e] for e in this_code]
+        this_name.sort()
+        names.append(':'.join(this_name))
+
+    if effects is None or isinstance(effects, string_types):
+        effects_ = names
+    else:
+        effects_ = effects
+
+    selection = [names.index(sel) for sel in effects_]
+    names = [names[sel] for sel in selection]
+
+    if isinstance(effects, string_types):
+        if '*' in effects:
+            # hierarchical order of effects
+            # the * based effect can be used as stop index
+            sel_ind = names.index(effects.replace('*', ':')) + 1
+            names = names[:sel_ind]
+            selection = selection[:sel_ind]
+
+    return selection, names
+
+
+def _get_contrast_indices(effect_idx, n_factors):
+    """Henson's factor coding, see num2binvec"""
+    binrepr = np.binary_repr(effect_idx, n_factors)
+    return np.array([int(i) for i in binrepr], dtype=int)
 
 
 def _iter_contrasts(n_subjects, factor_levels, effect_picks):
     """ Aux Function: Setup contrasts """
-    sc, sy, = [], []
-
+    from scipy.signal import detrend
+    sc = []
+    n_factors = len(factor_levels)
     # prepare computation of Kronecker products
     for n_levels in factor_levels:
         # for each factor append
@@ -126,23 +174,29 @@ def _iter_contrasts(n_subjects, factor_levels, effect_picks):
         # main + interaction effects for contrasts
         sc.append([np.ones([n_levels, 1]),
                    detrend(np.eye(n_levels), type='constant')])
-        # main + interaction effects for component means
-        sy.append([np.ones([n_levels, 1]) / n_levels, np.eye(n_levels)])
-        # XXX component means not returned at the moment
-
-    for (c1, c2, c3) in defaults_twoway_rm['iter_contrasts'][effect_picks]:
-        # c1 selects the first factors' level in the column vector
-        # c3 selects the actual factor
-        # c2 selects either its column vector or diag matrix
-        c_ = np.kron(sc[0][c1], sc[c3][c2])
-        # for 3 way anova accumulation of c_ across factors required
+
+    for this_effect in effect_picks:
+        contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
+        c_ = sc[0][contrast_idx[n_factors - 1]]
+        for i_contrast in range(1, n_factors):
+            this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
+            c_ = np.kron(c_, sc[i_contrast][this_contrast])
         df1 = matrix_rank(c_)
         df2 = df1 * (n_subjects - 1)
         yield c_, df1, df2
 
 
+ at deprecated('"f_threshold_twoway_rm" is deprecated and will be removed in'
+            'MNE-0.11. Please use f_threshold_mway_rm instead')
 def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
                           pvalue=0.05):
+    return f_threshold_mway_rm(
+        n_subjects=n_subjects, factor_levels=factor_levels,
+        effects=effects, pvalue=pvalue)
+
+
+def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
+                        pvalue=0.05):
     """ Compute f-value thesholds for a two-way ANOVA
 
     Parameters
@@ -167,46 +221,71 @@ def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
     f_threshold : list | float
         list of f-values for each effect if the number of effects
         requested > 2, else float.
+
+    See Also
+    --------
+    f_oneway
+    f_mway_rm
+
+    Notes
+    -----
+    .. versionadded:: 0.10
     """
-    effect_picks = _check_effects(effects)
+    from scipy.stats import f
+    effect_picks, _ = _map_effects(len(factor_levels), effects)
 
     f_threshold = []
     for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
                                        effect_picks):
-        f_threshold.append(stats.f(df1, df2).isf(pvalue))
+        f_threshold.append(f(df1, df2).isf(pvalue))
 
     return f_threshold if len(f_threshold) > 1 else f_threshold[0]
 
 
 # The following functions based on MATLAB code by Rik Henson
 # and Python code from the pvttble toolbox by Roger Lew.
+ at deprecated('"f_twoway_rm" is deprecated and will be removed in MNE 0.11."'
+            " Please use f_mway_rm instead")
 def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
                 correction=False, return_pvals=True):
-    """ 2 way repeated measures ANOVA for fully balanced designs
+    """This function is deprecated, use `f_mway_rm` instead"""
+    return f_mway_rm(data=data, factor_levels=factor_levels, effects=effects,
+                     alpha=alpha, correction=correction,
+                     return_pvals=return_pvals)
 
+
+def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
+              correction=False, return_pvals=True):
+    """M-way repeated measures ANOVA for fully balanced designs
+
+    Parameters
+    ----------
     data : ndarray
         3D array where the first two dimensions are compliant
-        with a subjects X conditions scheme:
-
-        first factor repeats slowest:
+        with a subjects X conditions scheme where the first
+        factor repeats slowest::
 
-                    A1B1 A1B2 A2B1 B2B2
-        subject 1   1.34 2.53 0.97 1.74
-        subject ... .... .... .... ....
-        subject k   2.45 7.90 3.09 4.76
+                        A1B1 A1B2 A2B1 A2B2
+            subject 1   1.34 2.53 0.97 1.74
+            subject ... .... .... .... ....
+            subject k   2.45 7.90 3.09 4.76
 
         The last dimensions is thought to carry the observations
         for mass univariate analysis.
     factor_levels : list-like
         The number of levels per factor.
-    effects : str
+    effects : str | list
         A string denoting the effect to be returned. The following
-        mapping is currently supported:
-            'A': main effect of A
-            'B': main effect of B
-            'A:B': interaction effect
-            'A+B': both main effects
-            'A*B': all three effects
+        mapping is currently supported (example with 2 factors):
+
+            * ``'A'``: main effect of A
+            * ``'B'``: main effect of B
+            * ``'A:B'``: interaction effect
+            * ``'A+B'``: both main effects
+            * ``'A*B'``: all three effects
+            * ``'all'``: all effects (equals 'A*B' in a 2 way design)
+
+        If list, effect names are used: ``['A', 'B', 'A:B']``.
     alpha : float
         The significance threshold.
     correction : bool
@@ -224,14 +303,24 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
         estimated.
     p_vals : ndarray
         If not requested via return_pvals, defaults to an empty array.
+
+    See Also
+    --------
+    f_oneway
+    f_threshold_mway_rm
+
+    Notes
+    -----
+    .. versionadded:: 0.10
     """
+    from scipy.stats import f
     if data.ndim == 2:  # general purpose support, e.g. behavioural data
         data = data[:, :, np.newaxis]
     elif data.ndim > 3:  # let's allow for some magic here.
-        data = data.reshape(data.shape[0], data.shape[1],
-                            np.prod(data.shape[2:]))
+        data = data.reshape(
+            data.shape[0], data.shape[1], np.prod(data.shape[2:]))
 
-    effect_picks = _check_effects(effects)
+    effect_picks, _ = _map_effects(len(factor_levels), effects)
     n_obs = data.shape[2]
     n_replications = data.shape[0]
 
@@ -259,10 +348,10 @@ def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
             df1, df2 = [d[None, :] * eps for d in (df1, df2)]
 
         if return_pvals:
-            pvals = stats.f(df1, df2).sf(fvals)
+            pvals = f(df1, df2).sf(fvals)
         else:
             pvals = np.empty(0)
         pvalues.append(pvals)
 
     # handle single effect returns
-    return [np.squeeze(np.asarray(v)) for v in (fvalues, pvalues)]
+    return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
diff --git a/mne/stats/regression.py b/mne/stats/regression.py
index a668f95..b5fb7d7 100644
--- a/mne/stats/regression.py
+++ b/mne/stats/regression.py
@@ -1,21 +1,25 @@
 # Authors: Tal Linzen <linzen at nyu.edu>
-#          Teon Brooks <teon at nyu.edu>
+#          Teon Brooks <teon.brooks at gmail.com>
 #          Denis A. Engemann <denis.engemann at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
+#          Marijn van Vliet <w.m.vanvliet at gmail.com>
 #
 # License: BSD (3-clause)
 
 from collections import namedtuple
 from inspect import isgenerator
 import warnings
+from ..externals.six import string_types
 
 import numpy as np
-from scipy import linalg, stats
+from scipy import linalg, sparse
 
 from ..source_estimate import SourceEstimate
 from ..epochs import _BaseEpochs
 from ..evoked import Evoked, EvokedArray
-from ..utils import logger
-from ..io.pick import pick_types
+from ..utils import logger, _reject_data_segments, _get_fast_dot
+from ..io.pick import pick_types, pick_info
+from ..fixes import in1d
 
 
 def linear_regression(inst, design_matrix, names=None):
@@ -102,6 +106,7 @@ def linear_regression(inst, design_matrix, names=None):
 
 def _fit_lm(data, design_matrix, names):
     """Aux function"""
+    from scipy import stats
     n_samples = len(data)
     n_features = np.product(data.shape[1:])
     if design_matrix.ndim != 2:
@@ -133,3 +138,200 @@ def _fit_lm(data, design_matrix, names):
         mlog10_p_val[predictor] = -np.log10(p_val[predictor])
 
     return beta, stderr, t_val, p_val, mlog10_p_val
+
+
+def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
+                          covariates=None, reject=None, flat=None, tstep=1.,
+                          decim=1, picks=None, solver='pinv'):
+    """Estimate regression-based evoked potentials/fields by linear modelling
+
+    This models the full M/EEG time course, including correction for
+    overlapping potentials and allowing for continuous/scalar predictors.
+    Internally, this constructs a predictor matrix X of size
+    n_samples * (n_conds * window length), solving the linear system
+    ``Y = bX`` and returning ``b`` as evoked-like time series split by
+    condition. See [1]_.
+
+    Parameters
+    ----------
+    raw : instance of Raw
+        A raw object. Note: be very careful about data that is not
+        downsampled, as the resulting matrices can be enormous and easily
+        overload your computer. Typically, 100 Hz sampling rate is
+        appropriate - or using the decim keyword (see below).
+    events : ndarray of int, shape (n_events, 3)
+        An array where the first column corresponds to samples in raw
+        and the last to integer codes in event_id.
+    event_id : dict
+        As in Epochs; a dictionary where the values may be integers or
+        iterables of integers, corresponding to the 3rd column of
+        events, and the keys are condition names.
+    tmin : float | dict
+        If float, gives the lower limit (in seconds) for the time window for
+        which all event types' effects are estimated. If a dict, can be used to
+        specify time windows for specific event types: keys correspond to keys
+        in event_id and/or covariates; for missing values, the default (-.1) is
+        used.
+    tmax : float | dict
+        If float, gives the upper limit (in seconds) for the time window for
+        which all event types' effects are estimated. If a dict, can be used to
+        specify time windows for specific event types: keys correspond to keys
+        in event_id and/or covariates; for missing values, the default (1.) is
+        used.
+    covariates : dict-like | None
+        If dict-like (e.g., a pandas DataFrame), values have to be array-like
+        and of the same length as the columns in ```events```. Keys correspond
+        to additional event types/conditions to be estimated and are matched
+        with the time points given by the first column of ```events```. If
+        None, only binary events (from event_id) are used.
+    reject : None | dict
+        For cleaning raw data before the regression is performed: set up
+        rejection parameters based on peak-to-peak amplitude in continuously
+        selected subepochs. If None, no rejection is done.
+        If dict, keys are types ('grad' | 'mag' | 'eeg' | 'eog' | 'ecg')
+        and values are the maximal peak-to-peak values to select rejected
+        epochs, e.g.::
+
+            reject = dict(grad=4000e-12, # T / m (gradiometers)
+                          mag=4e-11, # T (magnetometers)
+                          eeg=40e-5, # uV (EEG channels)
+                          eog=250e-5 # uV (EOG channels))
+
+    flat : None | dict
+        or cleaning raw data before the regression is performed: set up
+        rejection parameters based on flatness of the signal. If None, no
+        rejection is done. If a dict, keys are ('grad' | 'mag' |
+        'eeg' | 'eog' | 'ecg') and values are minimal peak-to-peak values to
+        select rejected epochs.
+    tstep : float
+        Length of windows for peak-to-peak detection for raw data cleaning.
+    decim : int
+        Decimate by choosing only a subsample of data points. Highly
+        recommended for data recorded at high sampling frequencies, as
+        otherwise huge intermediate matrices have to be created and inverted.
+    picks : None | list
+        List of indices of channels to be included. If None, defaults to all
+        MEG and EEG channels.
+    solver : str | function
+        Either a function which takes as its inputs the sparse predictor
+        matrix X and the observation matrix Y, and returns the coefficient
+        matrix b; or a string (for now, only 'pinv'), in which case the
+        solver used is dot(scipy.linalg.pinv(dot(X.T, X)), dot(X.T, Y.T)).T.
+
+    Returns
+    -------
+    evokeds : dict
+        A dict where the keys correspond to conditions and the values are
+        Evoked objects with the ER[F/P]s. These can be used exactly like any
+        other Evoked object, including e.g. plotting or statistics.
+
+    References
+    ----------
+    .. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
+           waveforms: II. Non-linear effects, overlap correction, and practical
+           considerations. Psychophysiology, 52(2), 169-189.
+    """
+
+    if isinstance(solver, string_types):
+        if solver == 'pinv':
+            fast_dot = _get_fast_dot()
+
+            # inv is slightly (~10%) faster, but pinv seemingly more stable
+            def solver(X, Y):
+                return fast_dot(linalg.pinv(X.T.dot(X).todense()),
+                                X.T.dot(Y.T)).T
+        else:
+            raise ValueError("No such solver: {0}".format(solver))
+
+    # prepare raw and events
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=True)
+    info = pick_info(raw.info, picks, copy=True)
+    decim = int(decim)
+    info["sfreq"] /= decim
+    data, times = raw[:]
+    data = data[picks, ::decim]
+    times = times[::decim]
+    events = events.copy()
+    events[:, 0] -= raw.first_samp
+    events[:, 0] //= decim
+
+    conds = list(event_id)
+    if covariates is not None:
+        conds += list(covariates)
+
+    # time windows (per event type) are converted to sample points from times
+    if isinstance(tmin, (float, int)):
+        tmin_s = dict((cond, int(tmin * info["sfreq"])) for cond in conds)
+    else:
+        tmin_s = dict((cond, int(tmin.get(cond, -.1) * info["sfreq"]))
+                      for cond in conds)
+    if isinstance(tmax, (float, int)):
+        tmax_s = dict(
+            (cond, int((tmax * info["sfreq"]) + 1.)) for cond in conds)
+    else:
+        tmax_s = dict((cond, int((tmax.get(cond, 1.) * info["sfreq"]) + 1))
+                      for cond in conds)
+
+    # Construct predictor matrix
+    # We do this by creating one array per event type, shape (lags, samples)
+    # (where lags depends on tmin/tmax and can be different for different
+    # event types). Columns correspond to predictors, predictors correspond to
+    # time lags. Thus, each array is mostly sparse, with one diagonal of 1s
+    # per event (for binary predictors).
+
+    cond_length = dict()
+    xs = []
+    for cond in conds:
+        tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
+        n_lags = int(tmax_ - tmin_)  # width of matrix
+        if cond in event_id:  # for binary predictors
+            ids = ([event_id[cond]]
+                   if isinstance(event_id[cond], int)
+                   else event_id[cond])
+            onsets = -(events[in1d(events[:, 2], ids), 0] + tmin_)
+            values = np.ones((len(onsets), n_lags))
+
+        else:  # for predictors from covariates, e.g. continuous ones
+            covs = covariates[cond]
+            if len(covs) != len(events):
+                error = ("Condition {0} from ```covariates``` is "
+                         "not the same length as ```events```").format(cond)
+                raise ValueError(error)
+            onsets = -(events[np.where(covs != 0), 0] + tmin_)[0]
+            v = np.asarray(covs)[np.nonzero(covs)].astype(float)
+            values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis]
+
+        cond_length[cond] = len(onsets)
+        xs.append(sparse.dia_matrix((values, onsets),
+                                    shape=(data.shape[1], n_lags)))
+
+    X = sparse.hstack(xs)
+
+    # find only those positions where at least one predictor isn't 0
+    has_val = np.unique(X.nonzero()[0])
+
+    # additionally, reject positions based on extreme steps in the data
+    if reject is not None:
+        _, inds = _reject_data_segments(data, reject, flat, decim=None,
+                                        info=info, tstep=tstep)
+        for t0, t1 in inds:
+            has_val = np.setdiff1d(has_val, range(t0, t1))
+
+    # solve linear system
+    X, data = X.tocsr()[has_val], data[:, has_val]
+    coefs = solver(X, data)
+
+    # construct Evoked objects to be returned from output
+    evokeds = dict()
+    cum = 0
+    for cond in conds:
+        tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
+        evokeds[cond] = EvokedArray(coefs[:, cum:cum + tmax_ - tmin_],
+                                    info=info, comment=cond,
+                                    tmin=tmin_ / float(info["sfreq"]),
+                                    nave=cond_length[cond],
+                                    kind='mean')  # note that nave and kind are
+        cum += tmax_ - tmin_                      # technically not correct
+
+    return evokeds
diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py
index d8067a0..3f00cc9 100644
--- a/mne/stats/tests/test_cluster_level.py
+++ b/mne/stats/tests/test_cluster_level.py
@@ -1,3 +1,5 @@
+import os
+import os.path as op
 import numpy as np
 from numpy.testing import (assert_equal, assert_array_equal,
                            assert_array_almost_equal)
@@ -11,32 +13,73 @@ from mne.stats.cluster_level import (permutation_cluster_test,
                                      spatio_temporal_cluster_test,
                                      spatio_temporal_cluster_1samp_test,
                                      ttest_1samp_no_p, summarize_clusters_stc)
+from mne.utils import run_tests_if_main, slow_test, _TempDir, set_log_file
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
+n_space = 50
+
+
 def _get_conditions():
     noise_level = 20
-
+    n_time_1 = 20
+    n_time_2 = 13
     normfactor = np.hanning(20).sum()
     rng = np.random.RandomState(42)
-    condition1_1d = rng.randn(40, 350) * noise_level
+    condition1_1d = rng.randn(n_time_1, n_space) * noise_level
     for c in condition1_1d:
         c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
 
-    condition2_1d = rng.randn(33, 350) * noise_level
+    condition2_1d = rng.randn(n_time_2, n_space) * noise_level
     for c in condition2_1d:
         c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
 
-    pseudoekp = 5 * np.hanning(150)[None, :]
-    condition1_1d[:, 100:250] += pseudoekp
-    condition2_1d[:, 100:250] -= pseudoekp
+    pseudoekp = 10 * np.hanning(25)[None, :]
+    condition1_1d[:, 25:] += pseudoekp
+    condition2_1d[:, 25:] -= pseudoekp
 
     condition1_2d = condition1_1d[:, :, np.newaxis]
     condition2_2d = condition2_1d[:, :, np.newaxis]
     return condition1_1d, condition2_1d, condition1_2d, condition2_2d
 
 
+def test_cache_dir():
+    """Test use of cache dir
+    """
+    tempdir = _TempDir()
+    orig_dir = os.getenv('MNE_CACHE_DIR', None)
+    orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
+    rng = np.random.RandomState(0)
+    X = rng.randn(9, 2, 10)
+    log_file = op.join(tempdir, 'log.txt')
+    try:
+        os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
+        os.environ['MNE_CACHE_DIR'] = tempdir
+        # Fix error for #1507: in-place when memmapping
+        permutation_cluster_1samp_test(
+            X, buffer_size=None, n_jobs=2, n_permutations=1,
+            seed=0, stat_fun=ttest_1samp_no_p, verbose=False)
+        # ensure that non-independence yields warning
+        stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
+        set_log_file(log_file)
+        permutation_cluster_1samp_test(
+            X, buffer_size=10, n_jobs=2, n_permutations=1,
+            seed=0, stat_fun=stat_fun, verbose=False)
+        with open(log_file, 'r') as fid:
+            assert_true('independently' in ''.join(fid.readlines()))
+    finally:
+        if orig_dir is not None:
+            os.environ['MNE_CACHE_DIR'] = orig_dir
+        else:
+            del os.environ['MNE_CACHE_DIR']
+        if orig_size is not None:
+            os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
+        else:
+            del os.environ['MNE_MEMMAP_MIN_SIZE']
+        set_log_file(None)
+
+
 def test_permutation_step_down_p():
     """Test cluster level permutations with step_down_p
     """
@@ -44,7 +87,7 @@ def test_permutation_step_down_p():
         try:
             from sklearn.feature_extraction.image import grid_to_graph
         except ImportError:
-            from scikits.learn.feature_extraction.image import grid_to_graph
+            from scikits.learn.feature_extraction.image import grid_to_graph  # noqa
     except ImportError:
         return
     rng = np.random.RandomState(0)
@@ -56,16 +99,16 @@ def test_permutation_step_down_p():
     thresh = 2
     # make sure it works when we use ALL points in step-down
     t, clusters, p, H0 = \
-            permutation_cluster_1samp_test(X, threshold=thresh,
-                                            step_down_p=1.0)
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=1.0)
     # make sure using step-down will actually yield improvements sometimes
     t, clusters, p_old, H0 = \
-            permutation_cluster_1samp_test(X, threshold=thresh,
-                                           step_down_p=0.0)
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=0.0)
     assert_equal(np.sum(p_old < 0.05), 1)  # just spatial cluster
     t, clusters, p_new, H0 = \
-            permutation_cluster_1samp_test(X, threshold=thresh,
-                                           step_down_p=0.05)
+        permutation_cluster_1samp_test(X, threshold=thresh,
+                                       step_down_p=0.05)
     assert_equal(np.sum(p_new < 0.05), 2)  # time one rescued
     assert_true(np.all(p_old >= p_new))
 
@@ -78,15 +121,13 @@ def test_cluster_permutation_test():
     for condition1, condition2 in zip((condition1_1d, condition1_2d),
                                       (condition2_1d, condition2_2d)):
         T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
-                                    [condition1, condition2],
-                                    n_permutations=100, tail=1, seed=1,
-                                    buffer_size=None)
+            [condition1, condition2], n_permutations=100, tail=1, seed=1,
+            buffer_size=None)
         assert_equal(np.sum(cluster_p_values < 0.05), 1)
 
         T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
-                                    [condition1, condition2],
-                                    n_permutations=100, tail=0, seed=1,
-                                    buffer_size=None)
+            [condition1, condition2], n_permutations=100, tail=0, seed=1,
+            buffer_size=None)
         assert_equal(np.sum(cluster_p_values < 0.05), 1)
 
         # test with 2 jobs and buffer_size enabled
@@ -98,6 +139,7 @@ def test_cluster_permutation_test():
         assert_array_equal(cluster_p_values, cluster_p_values_buff)
 
 
+ at slow_test
 def test_cluster_permutation_t_test():
     """Test cluster level permutations T-test
     """
@@ -166,13 +208,13 @@ def test_cluster_permutation_with_connectivity():
 
     did_warn = False
     for X1d, X2d, func, spatio_temporal_func in \
-                [(condition1_1d, condition1_2d,
-                  permutation_cluster_1samp_test,
-                  spatio_temporal_cluster_1samp_test),
-                 ([condition1_1d, condition2_1d],
-                  [condition1_2d, condition2_2d],
-                  permutation_cluster_test,
-                  spatio_temporal_cluster_test)]:
+            [(condition1_1d, condition1_2d,
+              permutation_cluster_1samp_test,
+              spatio_temporal_cluster_1samp_test),
+             ([condition1_1d, condition2_1d],
+              [condition1_2d, condition2_2d],
+              permutation_cluster_test,
+              spatio_temporal_cluster_test)]:
         out = func(X1d, **args)
         connectivity = grid_to_graph(1, n_pts)
         out_connectivity = func(X1d, connectivity=connectivity, **args)
@@ -203,15 +245,15 @@ def test_cluster_permutation_with_connectivity():
 
         # Make sure that we got the old ones back
         data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
-        data_2 = set([np.sum(out_connectivity_2[0][a[:n_pts]]) for a in
+        data_2 = set([np.sum(out_connectivity_2[0][a]) for a in
                      out_connectivity_2[1][:]])
         assert_true(len(data_1.intersection(data_2)) == len(data_1))
 
         # now use the other algorithm
         if isinstance(X1d, list):
-            X1d_3 = [np.reshape(x, (-1, 2, 350)) for x in X1d_2]
+            X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
         else:
-            X1d_3 = np.reshape(X1d_2, (-1, 2, 350))
+            X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))
 
         out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,
                                                   connectivity=connectivity,
@@ -250,8 +292,8 @@ def test_cluster_permutation_with_connectivity():
 
         if not _force_serial:
             assert_raises(ValueError, spatio_temporal_func, X1d_3,
-                          n_permutations=1, connectivity=connectivity, max_step=1,
-                          threshold=1.67, n_jobs=-1000)
+                          n_permutations=1, connectivity=connectivity,
+                          max_step=1, threshold=1.67, n_jobs=-1000)
 
         # not enough TFCE params
         assert_raises(KeyError, spatio_temporal_func, X1d_3,
@@ -290,6 +332,7 @@ def test_cluster_permutation_with_connectivity():
         assert_true(np.min(out_connectivity_6[2]) < 0.05)
 
 
+ at slow_test
 def test_permutation_connectivity_equiv():
     """Test cluster level permutations with and without connectivity
     """
@@ -302,76 +345,74 @@ def test_permutation_connectivity_equiv():
         return
     rng = np.random.RandomState(0)
     # subjects, time points, spatial points
-    X = rng.randn(7, 2, 10)
+    n_time = 2
+    n_space = 4
+    X = rng.randn(6, n_time, n_space)
     # add some significant points
-    X[:, 0:2, 0:2] += 10  # span two time points and two spatial points
-    X[:, 1, 5:9] += 10  # span four time points
+    X[:, :, 0:2] += 10  # span two time points and two spatial points
+    X[:, 1, 3] += 20  # span one time point
     max_steps = [1, 1, 1, 2]
     # This will run full algorithm in two ways, then the ST-algorithm in 2 ways
     # All of these should give the same results
-    conns = [None, grid_to_graph(2, 10),
-             grid_to_graph(1, 10), grid_to_graph(1, 10)]
+    conns = [None, grid_to_graph(n_time, n_space),
+             grid_to_graph(1, n_space), grid_to_graph(1, n_space)]
     stat_map = None
-    thresholds = [2, dict(start=0.5, step=0.5)]
-    sig_counts = [2, 8]
+    thresholds = [2, dict(start=1.5, step=1.0)]
+    sig_counts = [2, 5]
     sdps = [0, 0.05, 0.05]
     ots = ['mask', 'mask', 'indices']
+    stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
     for thresh, count in zip(thresholds, sig_counts):
         cs = None
         ps = None
         for max_step, conn in zip(max_steps, conns):
-            for stat_fun in [ttest_1samp_no_p,
-                             partial(ttest_1samp_no_p, sigma=1e-3)]:
-                for sdp, ot in zip(sdps, ots):
-                    t, clusters, p, H0 = \
-                            permutation_cluster_1samp_test(X,
-                                                           threshold=thresh,
-                                                           connectivity=conn,
-                                                           n_jobs=2,
-                                                           max_step=max_step,
-                                                           stat_fun=stat_fun,
-                                                           step_down_p=sdp,
-                                                           out_type=ot)
-                    # make sure our output datatype is correct
-                    if ot == 'mask':
-                        assert_true(isinstance(clusters[0], np.ndarray))
-                        assert_true(clusters[0].dtype == bool)
-                        assert_array_equal(clusters[0].shape, X.shape[1:])
-                    else:  # ot == 'indices'
-                        assert_true(isinstance(clusters[0], tuple))
-
-                    # make sure all comparisons were done; for TFCE, no perm
-                    # should come up empty
-                    if count == 8:
-                        assert_true(not np.any(H0 == 0))
-                    inds = np.where(p < 0.05)[0]
-                    assert_true(len(inds) == count)
-                    this_cs = [clusters[ii] for ii in inds]
-                    this_ps = p[inds]
-                    this_stat_map = np.zeros((2, 10), dtype=bool)
-                    for ci, c in enumerate(this_cs):
-                        if isinstance(c, tuple):
-                            this_c = np.zeros((2, 10), bool)
-                            for x, y in zip(c[0], c[1]):
-                                this_stat_map[x, y] = True
-                                this_c[x, y] = True
-                            this_cs[ci] = this_c
-                            c = this_c
-                        this_stat_map[c] = True
-                    if cs is None:
-                        ps = this_ps
-                        cs = this_cs
-                    if stat_map is None:
-                        stat_map = this_stat_map
-                    assert_array_equal(ps, this_ps)
-                    assert_true(len(cs) == len(this_cs))
-                    for c1, c2 in zip(cs, this_cs):
-                        assert_array_equal(c1, c2)
-                    assert_array_equal(stat_map, this_stat_map)
-
-
+            for sdp, ot in zip(sdps, ots):
+                t, clusters, p, H0 = \
+                    permutation_cluster_1samp_test(
+                        X, threshold=thresh, connectivity=conn, n_jobs=2,
+                        max_step=max_step, stat_fun=stat_fun,
+                        step_down_p=sdp, out_type=ot)
+                # make sure our output datatype is correct
+                if ot == 'mask':
+                    assert_true(isinstance(clusters[0], np.ndarray))
+                    assert_true(clusters[0].dtype == bool)
+                    assert_array_equal(clusters[0].shape, X.shape[1:])
+                else:  # ot == 'indices'
+                    assert_true(isinstance(clusters[0], tuple))
+
+                # make sure all comparisons were done; for TFCE, no perm
+                # should come up empty
+                if count == 8:
+                    assert_true(not np.any(H0 == 0))
+                inds = np.where(p < 0.05)[0]
+                assert_true(len(inds) == count)
+                this_cs = [clusters[ii] for ii in inds]
+                this_ps = p[inds]
+                this_stat_map = np.zeros((n_time, n_space), dtype=bool)
+                for ci, c in enumerate(this_cs):
+                    if isinstance(c, tuple):
+                        this_c = np.zeros((n_time, n_space), bool)
+                        for x, y in zip(c[0], c[1]):
+                            this_stat_map[x, y] = True
+                            this_c[x, y] = True
+                        this_cs[ci] = this_c
+                        c = this_c
+                    this_stat_map[c] = True
+                if cs is None:
+                    ps = this_ps
+                    cs = this_cs
+                if stat_map is None:
+                    stat_map = this_stat_map
+                assert_array_equal(ps, this_ps)
+                assert_true(len(cs) == len(this_cs))
+                for c1, c2 in zip(cs, this_cs):
+                    assert_array_equal(c1, c2)
+                assert_array_equal(stat_map, this_stat_map)
+
+
+ at slow_test
 def spatio_temporal_cluster_test_connectivity():
-    """Test cluster level permutations with and without connectivity
+    """Test spatio-temporal cluster permutations
     """
     try:
         try:
@@ -433,3 +474,6 @@ def test_summarize_clusters():
     assert_true(stc_sum.data.shape[1] == 2)
     clu[2][0] = 0.3
     assert_raises(RuntimeError, summarize_clusters_stc, clu)
+
+
+run_tests_if_main()
diff --git a/mne/stats/tests/test_multi_comp.py b/mne/stats/tests/test_multi_comp.py
index 4cba141..76b2c99 100644
--- a/mne/stats/tests/test_multi_comp.py
+++ b/mne/stats/tests/test_multi_comp.py
@@ -1,5 +1,6 @@
 import numpy as np
-from numpy.testing import assert_almost_equal, assert_allclose, assert_raises
+from numpy.testing import (
+    assert_almost_equal, assert_allclose, assert_raises, assert_array_equal)
 from nose.tools import assert_true
 from scipy import stats
 
@@ -25,6 +26,8 @@ def test_multi_pval_correction():
     assert_true(pval_bonferroni.ndim == 2)
     assert_true(reject_bonferroni.ndim == 2)
     assert_allclose(pval_bonferroni / 10000, pval)
+    reject_expected = pval_bonferroni < alpha
+    assert_array_equal(reject_bonferroni, reject_expected)
 
     fwer = np.mean(reject_bonferroni)
     assert_almost_equal(fwer, alpha, 1)
diff --git a/mne/stats/tests/test_parametric.py b/mne/stats/tests/test_parametric.py
index 9b98ce5..57f184d 100644
--- a/mne/stats/tests/test_parametric.py
+++ b/mne/stats/tests/test_parametric.py
@@ -1,6 +1,6 @@
 from itertools import product
-from ..parametric import (f_twoway_rm, f_threshold_twoway_rm,
-                          defaults_twoway_rm)
+from mne.stats.parametric import (f_mway_rm, f_threshold_mway_rm,
+                                  _map_effects)
 from nose.tools import assert_raises, assert_true
 from numpy.testing import assert_array_almost_equal
 
@@ -15,77 +15,97 @@ test_external = {
     # R 15.2
     # data generated using this code http://goo.gl/7UcKb
     'r_fvals': np.array([2.567619, 0.24006, 1.756380]),
-    'r_pvals_uncorrected': np.array([0.12557, 0.78776, 0.1864])
+    'r_pvals_uncorrected': np.array([0.12557, 0.78776, 0.1864]),
+    # and https://gist.github.com/dengemann/5539403
+    'r_fvals_3way': np.array([
+        0.74783999999999995,   # A
+        0.20895,               # B
+        0.21378,               # A:B
+        0.99404000000000003,   # C
+        0.094039999999999999,  # A:C
+        0.11685,               # B:C
+        2.78749]),              # A:B:C
+    'r_fvals_1way': np.array([0.67571999999999999])
 }
 
-#  generated using this expression: `np.random.RandomState(42).randn(20, 6)`
-test_data = np.array(
-[[0.49671415, -0.1382643, 0.64768854, 1.52302986, -0.23415337, -0.23413696],
- [1.57921282, 0.76743473, -0.46947439, 0.54256004, -0.46341769, -0.46572975],
- [0.24196227, -1.91328024, -1.72491783, -0.56228753, -1.01283112, 0.31424733],
- [-0.90802408, -1.4123037, 1.46564877, -0.2257763, 0.0675282, -1.42474819],
- [-0.54438272, 0.11092259, -1.15099358, 0.37569802, -0.60063869, -0.29169375],
- [-0.60170661, 1.85227818, -0.01349722, -1.05771093, 0.82254491, -1.22084365],
- [0.2088636, -1.95967012, -1.32818605, 0.19686124, 0.73846658, 0.17136828],
- [-0.11564828, -0.3011037, -1.47852199, -0.71984421, -0.46063877, 1.05712223],
- [0.34361829, -1.76304016, 0.32408397, -0.38508228, -0.676922, 0.61167629],
- [1.03099952, 0.93128012, -0.83921752, -0.30921238, 0.33126343, 0.97554513],
- [-0.47917424, -0.18565898, -1.10633497, -1.19620662, 0.81252582, 1.35624003],
- [-0.07201012, 1.0035329, 0.36163603, -0.64511975, 0.36139561, 1.53803657],
- [-0.03582604, 1.56464366, -2.6197451, 0.8219025, 0.08704707, -0.29900735],
- [0.09176078, -1.98756891, -0.21967189, 0.35711257, 1.47789404, -0.51827022],
- [-0.8084936, -0.50175704, 0.91540212, 0.32875111, -0.5297602, 0.51326743],
- [0.09707755, 0.96864499, -0.70205309, -0.32766215, -0.39210815, -1.46351495],
- [0.29612028, 0.26105527, 0.00511346, -0.23458713, -1.41537074, -0.42064532],
- [-0.34271452, -0.80227727, -0.16128571, 0.40405086, 1.8861859, 0.17457781],
- [0.25755039, -0.07444592, -1.91877122, -0.02651388, 0.06023021, 2.46324211],
- [-0.19236096, 0.30154734, -0.03471177, -1.16867804, 1.14282281, 0.75193303]])
+
+def generate_data(n_subjects, n_conditions):
+    """generate testing data"""
+    rng = np.random.RandomState(42)
+    data = rng.randn(n_subjects * n_conditions).reshape(
+        n_subjects, n_conditions)
+    return data
+
+
+def test_map_effects():
+    """ Test ANOVA effects parsing"""
+    selection, names = _map_effects(n_factors=2, effects='A')
+    assert_true(names, ['A'])
+
+    selection, names = _map_effects(n_factors=2, effects=['A', 'A:B'])
+    assert_true(names, ['A', 'A:B'])
+
+    selection, names = _map_effects(n_factors=3, effects='A*B')
+    assert_true(names, ['A', 'B', 'A:B'])
+
+    selection, names = _map_effects(n_factors=3, effects='A*C')
+    assert_true(names, ['A', 'B', 'A:B', 'C', 'A:C', 'B:C', 'A:B:C'])
+
+    assert_raises(ValueError, _map_effects, n_factors=2, effects='C')
+
+    assert_raises(ValueError, _map_effects, n_factors=27, effects='all')
 
 
 def test_f_twoway_rm():
     """ Test 2-way anova """
-    iter_params = product([4, 10], [2, 15], [4, 6, 8], ['A', 'B', 'A:B'],
-        [False, True])
+    iter_params = product([4, 10], [2, 15], [4, 6, 8],
+                          ['A', 'B', 'A:B'],
+                          [False, True])
+    _effects = {
+        4: [2, 2],
+        6: [2, 3],
+        8: [2, 4]
+    }
     for params in iter_params:
-        n_subj, n_obs, n_levels, picks, correction = params
+        n_subj, n_obs, n_levels, effects, correction = params
         data = np.random.random([n_subj, n_levels, n_obs])
-        effects = {
-            4: [2, 2],
-            6: [2, 3],
-            8: [2, 4]
-        }
-        fvals, pvals = f_twoway_rm(data, effects[n_levels], picks,
-                                      correction=correction)
+        fvals, pvals = f_mway_rm(data, _effects[n_levels], effects,
+                                 correction=correction)
         assert_true((fvals >= 0).all())
         if pvals.any():
             assert_true(((0 <= pvals) & (1 >= pvals)).all())
-        n_effects = len(defaults_twoway_rm['parse'][picks])
+        n_effects = len(_map_effects(n_subj, effects)[0])
         assert_true(fvals.size == n_obs * n_effects)
         if n_effects == 1:  # test for principle of least surprise ...
             assert_true(fvals.ndim == 1)
 
-        fvals_ = f_threshold_twoway_rm(n_subj, effects[n_levels], picks)
+        fvals_ = f_threshold_mway_rm(n_subj, _effects[n_levels], effects)
         assert_true((fvals_ >= 0).all())
         assert_true(fvals_.size == n_effects)
 
     data = np.random.random([n_subj, n_levels, 1])
-    assert_raises(ValueError, f_twoway_rm, data, effects[n_levels],
+    assert_raises(ValueError, f_mway_rm, data, _effects[n_levels],
                   effects='C', correction=correction)
     data = np.random.random([n_subj, n_levels, n_obs, 3])
     # check for dimension handling
-    f_twoway_rm(data, effects[n_levels], picks, correction=correction)
+    f_mway_rm(data, _effects[n_levels], effects, correction=correction)
 
     # now check against external software results
-    fvals, pvals = f_twoway_rm(test_data, [2, 3])
-
-    assert_array_almost_equal(fvals,
-        test_external['spss_fvals'], 3)
-    assert_array_almost_equal(pvals,
-        test_external['spss_pvals_uncorrected'], 3)
-    assert_array_almost_equal(fvals,
-        test_external['r_fvals'], 4)
-    assert_array_almost_equal(pvals,
-        test_external['r_pvals_uncorrected'], 3)
-
-    _, pvals = f_twoway_rm(test_data, [2, 3], correction=True)
+    test_data = generate_data(n_subjects=20, n_conditions=6)
+    fvals, pvals = f_mway_rm(test_data, [2, 3])
+
+    assert_array_almost_equal(fvals, test_external['spss_fvals'], 3)
+    assert_array_almost_equal(pvals, test_external['spss_pvals_uncorrected'],
+                              3)
+    assert_array_almost_equal(fvals, test_external['r_fvals'], 4)
+    assert_array_almost_equal(pvals, test_external['r_pvals_uncorrected'], 3)
+
+    _, pvals = f_mway_rm(test_data, [2, 3], correction=True)
     assert_array_almost_equal(pvals, test_external['spss_pvals_corrected'], 3)
+
+    test_data = generate_data(n_subjects=20, n_conditions=8)
+    fvals, _ = f_mway_rm(test_data, [2, 2, 2])
+    assert_array_almost_equal(fvals, test_external['r_fvals_3way'], 5)
+
+    fvals, _ = f_mway_rm(test_data, [8], 'A')
+    assert_array_almost_equal(fvals, test_external['r_fvals_1way'], 5)
diff --git a/mne/stats/tests/test_regression.py b/mne/stats/tests/test_regression.py
index 6255950..0dccf0f 100644
--- a/mne/stats/tests/test_regression.py
+++ b/mne/stats/tests/test_regression.py
@@ -1,5 +1,6 @@
-# Authors: Teon Brooks <teon at nyu.edu>
+# Authors: Teon Brooks <teon.brooks at gmail.com>
 #          Denis A. Engemann <denis.engemann at gmail.com>
+#          Jona Sassenhagen <jona.sassenhagen at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -7,27 +8,29 @@ import os.path as op
 import warnings
 
 import numpy as np
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_allclose
+
+from scipy.signal import hann
 
 from nose.tools import assert_raises, assert_true, assert_equal
 
 import mne
 from mne import read_source_estimate
-from mne.datasets import sample
-from mne.stats.regression import linear_regression
+from mne.datasets import testing
+from mne.stats.regression import linear_regression, linear_regression_raw
+from mne.io import RawArray
 
-data_path = sample.data_path(download=False)
-subjects_dir = op.join(data_path, 'subjects')
-stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
+data_path = testing.data_path(download=False)
+stc_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-lh.stc')
+raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw-eve.fif'
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_regression():
     """Test Ordinary Least Squares Regression
     """
-    data_path = sample.data_path()
-    raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
-    event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
     tmin, tmax = -0.2, 0.5
     event_id = dict(aud_l=1, aud_r=2)
 
@@ -65,3 +68,43 @@ def test_regression():
     for k in lm1:
         for v1, v2 in zip(lm1[k], lm2[k]):
             assert_array_equal(v1.data, v2.data)
+
+
+ at testing.requires_testing_data
+def test_continuous_regression_no_overlap():
+    """Test regression without overlap correction, on real data"""
+    tmin, tmax = -.1, .5
+
+    raw = mne.io.Raw(raw_fname, preload=True)
+    events = mne.read_events(event_fname)
+    event_id = dict(audio_l=1, audio_r=2)
+
+    raw = raw.pick_channels(raw.ch_names[:2])
+
+    epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
+                        baseline=None, reject=None)
+
+    revokeds = linear_regression_raw(raw, events, event_id,
+                                     tmin=tmin, tmax=tmax,
+                                     reject=None)
+
+    for cond in event_id.keys():
+        assert_allclose(revokeds[cond].data,
+                        epochs[cond].average().data)
+
+
+def test_continuous_regression_with_overlap():
+    """Test regression with overlap correction"""
+    signal = np.zeros(100000)
+    times = [1000, 2500, 3000, 5000, 5250, 7000, 7250, 8000]
+    events = np.zeros((len(times), 3), int)
+    events[:, 2] = 1
+    events[:, 0] = times
+    signal[events[:, 0]] = 1.
+    effect = hann(101)
+    signal = np.convolve(signal, effect)[:len(signal)]
+    raw = RawArray(signal[np.newaxis, :], mne.create_info(1, 100, 'eeg'))
+
+    assert_allclose(effect,
+                    linear_regression_raw(raw, events, {1: 1}, tmin=0)[1]
+                    .data.flatten())
diff --git a/mne/surface.py b/mne/surface.py
index e07cd90..8013042 100644
--- a/mne/surface.py
+++ b/mne/surface.py
@@ -4,310 +4,53 @@
 #
 # License: BSD (3-clause)
 
-from .externals.six import string_types
 import os
 from os import path as op
 import sys
 from struct import pack
+from glob import glob
+
 import numpy as np
-from scipy.spatial.distance import cdist
-from scipy import sparse
-from fnmatch import fnmatch
+from scipy.sparse import coo_matrix, csr_matrix, eye as speye
 
+from .bem import read_bem_surfaces
 from .io.constants import FIFF
 from .io.open import fiff_open
 from .io.tree import dir_tree_find
 from .io.tag import find_tag
-from .io.write import (write_int, write_float, write_float_matrix,
-                       write_int_matrix, start_file, end_block,
+from .io.write import (write_int, start_file, end_block,
                        start_block, end_file, write_string,
                        write_float_sparse_rcs)
-from .channels import _get_meg_system
+from .channels.channels import _get_meg_system
 from .transforms import transform_surface_to
 from .utils import logger, verbose, get_subjects_dir
-
-
-##############################################################################
-# BEM
-
- at verbose
-def read_bem_surfaces(fname, add_geom=False, s_id=None, verbose=None):
-    """Read the BEM surfaces from a FIF file
-
-    Parameters
-    ----------
-    fname : string
-        The name of the file containing the surfaces.
-    add_geom : bool, optional (default False)
-        If True add geometry information to the surfaces.
-    s_id : int | None
-        If int, only read and return the surface with the given s_id.
-        An error will be raised if it doesn't exist. If None, all
-        surfaces are read and returned.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Returns
-    -------
-    surf: list | dict
-        A list of dictionaries that each contain a surface. If s_id
-        is not None, only the requested surface will be returned.
-    """
-    #
-    #   Default coordinate frame
-    #
-    coord_frame = FIFF.FIFFV_COORD_MRI
-    #
-    #   Open the file, create directory
-    #
-    fid, tree, _ = fiff_open(fname)
-    #
-    #   Find BEM
-    #
-    bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
-    if bem is None:
-        fid.close()
-        raise ValueError('BEM data not found')
-
-    bem = bem[0]
-    #
-    #   Locate all surfaces
-    #
-    bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
-    if bemsurf is None:
-        fid.close()
-        raise ValueError('BEM surface data not found')
-
-    logger.info('    %d BEM surfaces found' % len(bemsurf))
-    #
-    #   Coordinate frame possibly at the top level
-    #
-    tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
-    if tag is not None:
-        coord_frame = tag.data
-    #
-    #   Read all surfaces
-    #
-    if s_id is not None:
-        surfs = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
-                 for bsurf in bemsurf]
-        surfs = [s for s in surfs if s is not None]
-        if not len(surfs) == 1:
-            raise ValueError('surface with id %d not found' % s_id)
-        fid.close()
-        return surfs[0]
-
-    surf = []
-    for bsurf in bemsurf:
-        logger.info('    Reading a surface...')
-        this = _read_bem_surface(fid, bsurf, coord_frame)
-        logger.info('[done]')
-        if add_geom:
-            _complete_surface_info(this)
-        surf.append(this)
-
-    logger.info('    %d BEM surfaces read' % len(surf))
-
-    fid.close()
-
-    return surf
-
-
-def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
-    """Read one bem surface
-    """
-    res = dict()
-    #
-    #   Read all the interesting stuff
-    #
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
-
-    if tag is None:
-        res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
-    else:
-        res['id'] = int(tag.data)
-
-    if s_id is not None:
-        if res['id'] != s_id:
-            return None
-
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
-    if tag is None:
-        res['sigma'] = 1.0
-    else:
-        res['sigma'] = float(tag.data)
-
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
-    if tag is None:
-        fid.close()
-        raise ValueError('Number of vertices not found')
-
-    res['np'] = int(tag.data)
-
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
-    if tag is None:
-        fid.close()
-        raise ValueError('Number of triangles not found')
-    else:
-        res['ntri'] = int(tag.data)
-
-    tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
-    if tag is None:
-        tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
-        if tag is None:
-            res['coord_frame'] = def_coord_frame
-        else:
-            res['coord_frame'] = tag.data
-    else:
-        res['coord_frame'] = tag.data
-    #
-    #   Vertices, normals, and triangles
-    #
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
-    if tag is None:
-        fid.close()
-        raise ValueError('Vertex data not found')
-
-    res['rr'] = tag.data.astype(np.float)  # XXX : double because of mayavi bug
-    if res['rr'].shape[0] != res['np']:
-        fid.close()
-        raise ValueError('Vertex information is incorrect')
-
-    tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
-    if tag is None:
-        tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
-    if tag is None:
-        res['nn'] = []
-    else:
-        res['nn'] = tag.data
-        if res['nn'].shape[0] != res['np']:
-            fid.close()
-            raise ValueError('Vertex normal information is incorrect')
-
-    tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
-    if tag is None:
-        fid.close()
-        raise ValueError('Triangulation not found')
-
-    res['tris'] = tag.data - 1  # index start at 0 in Python
-    if res['tris'].shape[0] != res['ntri']:
-        fid.close()
-        raise ValueError('Triangulation information is incorrect')
-
-    return res
-
-
- at verbose
-def read_bem_solution(fname, verbose=None):
-    """Read the BEM solution from a file
-
-    Parameters
-    ----------
-    fname : string
-        The file containing the BEM solution.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-
-    Returns
-    -------
-    bem : dict
-        The BEM solution.
-    """
-    logger.info('Loading surfaces...')
-    bem_surfs = read_bem_surfaces(fname, add_geom=True, verbose=False)
-    if len(bem_surfs) == 3:
-        logger.info('Three-layer model surfaces loaded.')
-        needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
-                           FIFF.FIFFV_BEM_SURF_ID_SKULL,
-                           FIFF.FIFFV_BEM_SURF_ID_BRAIN])
-        if not all([x['id'] in needed for x in bem_surfs]):
-            raise RuntimeError('Could not find necessary BEM surfaces')
-        # reorder surfaces as necessary (shouldn't need to?)
-        reorder = [None] * 3
-        for x in bem_surfs:
-            reorder[np.where(x['id'] == needed)[0][0]] = x
-        bem_surfs = reorder
-    elif len(bem_surfs) == 1:
-        if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
-            raise RuntimeError('BEM Surfaces not found')
-        logger.info('Homogeneous model surface loaded.')
-
-    # convert from surfaces to solution
-    bem = dict(surfs=bem_surfs)
-    logger.info('\nLoading the solution matrix...\n')
-    f, tree, _ = fiff_open(fname)
-    with f as fid:
-        # Find the BEM data
-        nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
-        if len(nodes) == 0:
-            raise RuntimeError('No BEM data in %s' % fname)
-        bem_node = nodes[0]
-
-        # Approximation method
-        tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
-        method = tag.data[0]
-        if method == FIFF.FIFFV_BEM_APPROX_CONST:
-            method = 'constant collocation'
-        elif method == FIFF.FIFFV_BEM_APPROX_LINEAR:
-            method = 'linear collocation'
-        else:
-            raise RuntimeError('Cannot handle BEM approximation method : %d'
-                               % method)
-
-        tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
-        dims = tag.data.shape
-        if len(dims) != 2:
-            raise RuntimeError('Expected a two-dimensional solution matrix '
-                               'instead of a %d dimensional one' % dims[0])
-
-        dim = 0
-        for surf in bem['surfs']:
-            if method == 'linear collocation':
-                dim += surf['np']
-            else:
-                dim += surf['ntri']
-
-        if dims[0] != dim or dims[1] != dim:
-            raise RuntimeError('Expected a %d x %d solution matrix instead of '
-                               'a %d x %d one' % (dim, dim, dims[1], dims[0]))
-        sol = tag.data
-        nsol = dims[0]
-
-    # Gamma factors and multipliers
-    bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
-    # Dirty trick for the zero conductivity outside
-    sigma = np.r_[0.0, bem['sigma']]
-    bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
-    bem['field_mult'] = sigma[1:] - sigma[:-1]
-    # make sure subsequent "zip"s work correctly
-    assert len(bem['surfs']) == len(bem['field_mult'])
-    bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
-                    (sigma[1:] + sigma[:-1])[:, np.newaxis])
-    bem['sol_name'] = fname
-    bem['solution'] = sol
-    bem['nsol'] = nsol
-    bem['bem_method'] = method
-    logger.info('Loaded %s BEM solution from %s', bem['bem_method'], fname)
-    return bem
+from .externals.six import string_types
 
 
 ###############################################################################
 # AUTOMATED SURFACE FINDING
 
-def get_head_surf(subject, source='bem', subjects_dir=None):
+ at verbose
+def get_head_surf(subject, source=('bem', 'head'), subjects_dir=None,
+                  verbose=None):
     """Load the subject head surface
 
     Parameters
     ----------
     subject : str
         Subject name.
-    source : str
+    source : str | list of str
         Type to load. Common choices would be `'bem'` or `'head'`. We first
         try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
-        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory.
+        then look for `'$SUBJECT*$SOURCE.fif'` in the same directory by going
+        through all files matching the pattern. The head surface will be read
+        from the first file containing a head surface. Can also be a list
+        to try multiple strings.
     subjects_dir : str, or None
         Path to the SUBJECTS_DIR. If None, the path is obtained by using
         the environment variable SUBJECTS_DIR.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -317,29 +60,45 @@ def get_head_surf(subject, source='bem', subjects_dir=None):
     # Load the head surface from the BEM
     subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
     # use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
-    this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
-                                    '%s-%s.fif' % (subject, source)))
-    if not op.isfile(this_head):
-        # let's do a more sophisticated search
-        this_head = None
-        path = op.join(subjects_dir, subject, 'bem')
-        if not op.isdir(path):
-            raise IOError('Subject bem directory "%s" does not exist'
-                          % path)
-        files = os.listdir(path)
-        for fname in files:
-            if fnmatch(fname, '%s*%s.fif' % (subject, source)):
-                this_head = op.join(path, fname)
-                break
-        if this_head is None:
-            raise IOError('No file matching "%s*%s" found'
-                          % (subject, source))
-    surf = read_bem_surfaces(this_head, True,
-                             FIFF.FIFFV_BEM_SURF_ID_HEAD)
+    if isinstance(source, string_types):
+        source = [source]
+    surf = None
+    for this_source in source:
+        this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
+                                        '%s-%s.fif' % (subject, this_source)))
+        if op.exists(this_head):
+            surf = read_bem_surfaces(this_head, True,
+                                     FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                                     verbose=False)
+        else:
+            # let's do a more sophisticated search
+            path = op.join(subjects_dir, subject, 'bem')
+            if not op.isdir(path):
+                raise IOError('Subject bem directory "%s" does not exist'
+                              % path)
+            files = sorted(glob(op.join(path, '%s*%s.fif'
+                                        % (subject, this_source))))
+            for this_head in files:
+                try:
+                    surf = read_bem_surfaces(this_head, True,
+                                             FIFF.FIFFV_BEM_SURF_ID_HEAD,
+                                             verbose=False)
+                except ValueError:
+                    pass
+                else:
+                    break
+        if surf is not None:
+            break
+
+    if surf is None:
+        raise IOError('No file matching "%s*%s" and containing a head '
+                      'surface found' % (subject, this_source))
+    logger.info('Using surface from %s' % this_head)
     return surf
 
 
-def get_meg_helmet_surf(info, trans=None):
+ at verbose
+def get_meg_helmet_surf(info, trans=None, verbose=None):
     """Load the MEG helmet associated with the MEG sensors
 
     Parameters
@@ -350,6 +109,8 @@ def get_meg_helmet_surf(info, trans=None):
         The head<->MRI transformation, usually obtained using
         read_trans(). Can be None, in which case the surface will
         be in head coordinates instead of MRI coordinates.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -357,9 +118,11 @@ def get_meg_helmet_surf(info, trans=None):
         The MEG helmet as a surface.
     """
     system = _get_meg_system(info)
+    logger.info('Getting helmet for system %s' % system)
     fname = op.join(op.split(__file__)[0], 'data', 'helmets',
                     system + '.fif.gz')
-    surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET)
+    surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET,
+                             verbose=False)
 
     # Ignore what the file says, it's in device coords and we want MRI coords
     surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
@@ -409,6 +172,13 @@ def fast_cross_3d(x, y):
         return np.cross(x, y)
 
 
+def _fast_cross_nd_sum(a, b, c):
+    """Fast cross and sum"""
+    return ((a[..., 1] * b[..., 2] - a[..., 2] * b[..., 1]) * c[..., 0] +
+            (a[..., 2] * b[..., 0] - a[..., 0] * b[..., 2]) * c[..., 1] +
+            (a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]) * c[..., 2])
+
+
 def _accumulate_normals(tris, tri_nn, npts):
     """Efficiently accumulate triangle normals"""
     # this code replaces the following, but is faster (vectorized):
@@ -468,7 +238,8 @@ def _triangle_coords(r, geom, best):
     return x, y, z
 
 
-def _complete_surface_info(this, do_neighbor_vert=False):
+ at verbose
+def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
     """Complete surface info"""
     # based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
 
@@ -509,6 +280,7 @@ def _complete_surface_info(this, do_neighbor_vert=False):
 
     #   Determine the neighboring vertices and fix errors
     if do_neighbor_vert is True:
+        logger.info('    Vertex neighbors...')
         this['neighbor_vert'] = [_get_surf_neighbors(this, k)
                                  for k in range(this['np'])]
 
@@ -517,11 +289,9 @@ def _complete_surface_info(this, do_neighbor_vert=False):
 
 def _get_surf_neighbors(surf, k):
     """Calculate the surface neighbors based on triangulation"""
-    verts = np.concatenate([surf['tris'][nt]
-                            for nt in surf['neighbor_tri'][k]])
+    verts = surf['tris'][surf['neighbor_tri'][k]]
     verts = np.setdiff1d(verts, [k], assume_unique=False)
-    if np.any(verts >= surf['np']):
-        raise RuntimeError
+    assert np.all(verts < surf['np'])
     nneighbors = len(verts)
     nneigh_max = len(surf['neighbor_tri'][k])
     if nneighbors > nneigh_max:
@@ -555,11 +325,15 @@ def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
     use_balltree : bool
         Use fast BallTree based search from scikit-learn. If scikit-learn
         is not installed it will fall back to the slow brute force search.
+    return_dists : bool
+        If True, return associated distances.
 
     Returns
     -------
     nearest : array, shape=(n_query,)
         Index of nearest neighbor in xhs for every point in rr.
+    distances : array, shape=(n_query,)
+        The distances. Only returned if return_dists is True.
     """
     if use_balltree:
         try:
@@ -569,6 +343,10 @@ def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
                         'faster if scikit-learn is installed.')
             use_balltree = False
 
+    if xhs.size == 0 or rr.size == 0:
+        if return_dists:
+            return np.array([], int), np.array([])
+        return np.array([], int)
     if use_balltree is True:
         ball_tree = BallTree(xhs)
         if return_dists:
@@ -578,6 +356,7 @@ def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
             nearest = ball_tree.query(rr, k=1, return_distance=False)[:, 0]
             return nearest
     else:
+        from scipy.spatial.distance import cdist
         if return_dists:
             nearest = list()
             dists = list()
@@ -642,47 +421,43 @@ def read_surface(fname, verbose=None):
     tris : int array, shape=(n_faces, 3)
         Triangulation (each line contains indexes for three points which
         together form a face).
+
+    See Also
+    --------
+    write_surface
     """
     TRIANGLE_MAGIC = 16777214
     QUAD_MAGIC = 16777215
     NEW_QUAD_MAGIC = 16777213
     with open(fname, "rb", buffering=0) as fobj:  # buffering=0 for np bug
         magic = _fread3(fobj)
-        if (magic == QUAD_MAGIC) or (magic == NEW_QUAD_MAGIC):  # Quad file or new quad
+        # Quad file or new quad
+        if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC):
             create_stamp = ''
             nvert = _fread3(fobj)
             nquad = _fread3(fobj)
-            if magic == QUAD_MAGIC:
-                coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float) / 100.
-            else:
-                coords = np.fromfile(fobj, ">f4", nvert * 3).astype(np.float)
-
+            (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
+            coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div
             coords = coords.reshape(-1, 3)
             quads = _fread3_many(fobj, nquad * 4)
             quads = quads.reshape(nquad, 4)
-            #
-            #   Face splitting follows
-            #
+
+            # Face splitting follows
             faces = np.zeros((2 * nquad, 3), dtype=np.int)
             nface = 0
             for quad in quads:
                 if (quad[0] % 2) == 0:
-                    faces[nface] = quad[0], quad[1], quad[3]
-                    nface += 1
-                    faces[nface] = quad[2], quad[3], quad[1]
-                    nface += 1
+                    faces[nface:nface + 2] = [[quad[0], quad[1], quad[3]],
+                                              [quad[2], quad[3], quad[1]]]
                 else:
-                    faces[nface] = quad[0], quad[1], quad[2]
-                    nface += 1
-                    faces[nface] = quad[0], quad[2], quad[3]
-                    nface += 1
-
+                    faces[nface:nface + 2] = [[quad[0], quad[1], quad[2]],
+                                              [quad[0], quad[2], quad[3]]]
+                nface += 2
         elif magic == TRIANGLE_MAGIC:  # Triangle file
             create_stamp = fobj.readline()
-            _ = fobj.readline()  # analysis:ignore
+            fobj.readline()
             vnum = np.fromfile(fobj, ">i4", 1)[0]
             fnum = np.fromfile(fobj, ">i4", 1)[0]
-            #raise RuntimeError
             coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
             faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
         else:
@@ -696,7 +471,7 @@ def read_surface(fname, verbose=None):
 
 
 @verbose
-def _read_surface_geom(fname, add_geom=True, norm_rr=False, verbose=None):
+def _read_surface_geom(fname, patch_stats=True, norm_rr=False, verbose=None):
     """Load the surface as dict, optionally add the geometry information"""
     # based on mne_load_surface_geom() in mne_surface_io.c
     if isinstance(fname, string_types):
@@ -709,7 +484,7 @@ def _read_surface_geom(fname, add_geom=True, norm_rr=False, verbose=None):
         s = fname
     else:
         raise RuntimeError('fname cannot be understood as str or dict')
-    if add_geom is True:
+    if patch_stats is True:
         s = _complete_surface_info(s)
     if norm_rr is True:
         _normalize_vectors(s['rr'])
@@ -719,13 +494,14 @@ def _read_surface_geom(fname, add_geom=True, norm_rr=False, verbose=None):
 ##############################################################################
 # SURFACE CREATION
 
-def _get_ico_surface(grade):
+def _get_ico_surface(grade, patch_stats=False):
     """Return an icosahedral surface of the desired grade"""
     # always use verbose=False since users don't need to know we're pulling
     # these from a file
     ico_file_name = op.join(op.dirname(__file__), 'data',
                             'icos.fif.gz')
-    ico = read_bem_surfaces(ico_file_name, s_id=9000 + grade, verbose=False)
+    ico = read_bem_surfaces(ico_file_name, patch_stats, s_id=9000 + grade,
+                            verbose=False)
     return ico
 
 
@@ -783,7 +559,7 @@ def _tessellate_sphere(mylevel):
                 /\           Normalize a, b, c
                /  \
              b/____\c        Construct new triangles
-             /\    /\	       [0,b,a]
+             /\    /\        [0,b,a]
             /  \  /  \       [b,1,c]
            /____\/____\      [a,b,c]
           0     a      2     [a,c,2]
@@ -833,10 +609,11 @@ def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
     surf = _read_surface_geom(surf)
 
     if stype in ['ico', 'oct']:
-        ### from mne_ico_downsample.c ###
+        # ## from mne_ico_downsample.c ## #
         surf_name = op.join(subjects_dir, subject, 'surf', hemi + '.sphere')
         logger.info('Loading geometry from %s...' % surf_name)
-        from_surf = _read_surface_geom(surf_name, norm_rr=True, add_geom=False)
+        from_surf = _read_surface_geom(surf_name, norm_rr=True,
+                                       patch_stats=False)
         if not len(from_surf['rr']) == surf['np']:
             raise RuntimeError('Mismatch between number of surface vertices, '
                                'possible parcellation error?')
@@ -909,6 +686,10 @@ def write_surface(fname, coords, faces, create_stamp=''):
     create_stamp : str
         Comment that is written to the beginning of the file. Can not contain
         line breaks.
+
+    See Also
+    --------
+    read_surface
     """
     if len(create_stamp.splitlines()) > 1:
         raise ValueError("create_stamp can only contain one line")
@@ -926,43 +707,7 @@ def write_surface(fname, coords, faces, create_stamp=''):
 
 
 ###############################################################################
-# Write
-
-def write_bem_surface(fname, surf):
-    """Write one bem surface
-
-    Parameters
-    ----------
-    fname : string
-        File to write
-    surf : dict
-        A surface structured as obtained with read_bem_surfaces
-    """
-
-    # Create the file and save the essentials
-    fid = start_file(fname)
-
-    start_block(fid, FIFF.FIFFB_BEM)
-    start_block(fid, FIFF.FIFFB_BEM_SURF)
-
-    write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
-    write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
-    write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
-    write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
-    write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surf['coord_frame'])
-    write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
-
-    if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
-        write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, surf['nn'])
-
-    # index start at 0 in Python
-    write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, surf['tris'] + 1)
-
-    end_block(fid, FIFF.FIFFB_BEM_SURF)
-    end_block(fid, FIFF.FIFFB_BEM)
-
-    end_file(fid)
-
+# Decimation
 
 def _decimate_surface(points, triangles, reduction):
     """Aux function"""
@@ -1051,7 +796,7 @@ def read_morph_map(subject_from, subject_to, subjects_dir=None,
     if not op.isdir(mmap_dir):
         try:
             os.mkdir(mmap_dir)
-        except:
+        except Exception:
             logger.warning('Could not find or make morph map directory "%s"'
                            % mmap_dir)
 
@@ -1104,11 +849,8 @@ def read_morph_map(subject_from, subject_to, subjects_dir=None,
                         right_map = tag.data
                         logger.info('    Right-hemisphere map read.')
 
-    if left_map is None:
-        raise ValueError('Left hemisphere map not found in %s' % fname)
-
-    if right_map is None:
-        raise ValueError('Left hemisphere map not found in %s' % fname)
+    if left_map is None or right_map is None:
+        raise ValueError('Could not find both hemispheres in %s' % fname)
 
     return left_map, right_map
 
@@ -1183,7 +925,7 @@ def _make_morph_map(subject_from, subject_to, subjects_dir=None):
                             '%s.sphere.reg' % hemi)
             from_pts = read_surface(fname, verbose=False)[0]
             n_pts = len(from_pts)
-            morph_maps.append(sparse.eye(n_pts, n_pts, format='csr'))
+            morph_maps.append(speye(n_pts, n_pts, format='csr'))
         return morph_maps
 
     for hemi in ['lh', 'rh']:
@@ -1216,9 +958,8 @@ def _make_morph_map(subject_from, subject_to, subjects_dir=None):
 
         nn_tris = from_tris[nn_tri_inds]
         row_ind = np.repeat(np.arange(n_to_pts), 3)
-        this_map = sparse.csr_matrix((nn_tris_weights,
-                                     (row_ind, nn_tris.ravel())),
-                                     shape=(n_to_pts, n_from_pts))
+        this_map = csr_matrix((nn_tris_weights, (row_ind, nn_tris.ravel())),
+                              shape=(n_to_pts, n_from_pts))
         morph_maps.append(this_map)
 
     return morph_maps
@@ -1295,14 +1036,13 @@ def _nearest_tri_edge(pt_tris, to_pt, pqs, dist, tri_geom):
                                0.0), 1.0)
     q0 = np.zeros_like(p0)
     #   Side 2 -> 3
-    t1 = (0.5 * ((2.0 * aa - cc) * (1.0 - pp)
-                 + (2.0 * bb - cc) * qq) / (aa + bb - cc))
+    t1 = (0.5 * ((2.0 * aa - cc) * (1.0 - pp) +
+                 (2.0 * bb - cc) * qq) / (aa + bb - cc))
     t1 = np.minimum(np.maximum(t1, 0.0), 1.0)
     p1 = 1.0 - t1
     q1 = t1
     #   Side 1 -> 3
-    q2 = np.minimum(np.maximum(qq + 0.5 * (pp * cc)
-                               / bb, 0.0), 1.0)
+    q2 = np.minimum(np.maximum(qq + 0.5 * (pp * cc) / bb, 0.0), 1.0)
     p2 = np.zeros_like(q2)
 
     # figure out which one had the lowest distance
@@ -1315,3 +1055,59 @@ def _nearest_tri_edge(pt_tris, to_pt, pqs, dist, tri_geom):
     ii = np.argmin(np.abs(dists))
     p, q, pt, dist = pp[ii], qq[ii], pt_tris[ii % len(pt_tris)], dists[ii]
     return p, q, pt, dist
+
+
+def mesh_edges(tris):
+    """Returns sparse matrix with edges as an adjacency matrix
+
+    Parameters
+    ----------
+    tris : array of shape [n_triangles x 3]
+        The triangles.
+
+    Returns
+    -------
+    edges : sparse matrix
+        The adjacency matrix.
+    """
+    if np.max(tris) > len(np.unique(tris)):
+        raise ValueError('Cannot compute connectivity on a selection of '
+                         'triangles.')
+
+    npoints = np.max(tris) + 1
+    ones_ntris = np.ones(3 * len(tris))
+
+    a, b, c = tris.T
+    x = np.concatenate((a, b, c))
+    y = np.concatenate((b, c, a))
+    edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
+    edges = edges.tocsr()
+    edges = edges + edges.T
+    return edges
+
+
+def mesh_dist(tris, vert):
+    """Compute adjacency matrix weighted by distances
+
+    It generates an adjacency matrix where the entries are the distances
+    between neighboring vertices.
+
+    Parameters
+    ----------
+    tris : array (n_tris x 3)
+        Mesh triangulation
+    vert : array (n_vert x 3)
+        Vertex locations
+
+    Returns
+    -------
+    dist_matrix : scipy.sparse.csr_matrix
+        Sparse matrix with distances between adjacent vertices
+    """
+    edges = mesh_edges(tris).tocoo()
+
+    # Euclidean distances between neighboring vertices
+    dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
+                          axis=1))
+    dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
+    return dist_matrix
diff --git a/mne/tests/__init__.py b/mne/tests/__init__.py
index bf0f249..e69de29 100644
--- a/mne/tests/__init__.py
+++ b/mne/tests/__init__.py
@@ -1,3 +0,0 @@
-# need this for forward/test_make_forward.py
-from . import test_source_space
-
diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py
new file mode 100644
index 0000000..dee1b83
--- /dev/null
+++ b/mne/tests/test_bem.py
@@ -0,0 +1,264 @@
+# Authors: Marijn van Vliet <w.m.vanvliet at gmail.com>
+#
+# License: BSD 3 clause
+
+import os.path as op
+import numpy as np
+from nose.tools import assert_raises, assert_true
+from numpy.testing import assert_equal, assert_allclose
+
+from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces,
+                 make_bem_solution, read_bem_solution, write_bem_solution,
+                 make_sphere_model, Transform)
+from mne.preprocessing.maxfilter import fit_sphere_to_headshape
+from mne.io.constants import FIFF
+from mne.transforms import translation
+from mne.datasets import testing
+from mne.utils import run_tests_if_main, _TempDir, slow_test
+from mne.bem import (_ico_downsample, _get_ico_map, _order_surfaces,
+                     _assert_complete_surface, _assert_inside,
+                     _check_surface_size, _bem_find_surface)
+from mne.io import read_info
+
+fname_raw = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+                    'test_raw.fif')
+subjects_dir = op.join(testing.data_path(download=False), 'subjects')
+fname_bem_3 = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-320-320-320-bem.fif')
+fname_bem_1 = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-320-bem.fif')
+fname_bem_sol_3 = op.join(subjects_dir, 'sample', 'bem',
+                          'sample-320-320-320-bem-sol.fif')
+fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem',
+                          'sample-320-bem-sol.fif')
+
+
+def _compare_bem_surfaces(surfs_1, surfs_2):
+    """Helper to compare BEM surfaces"""
+    names = ['id', 'nn', 'rr', 'coord_frame', 'tris', 'sigma', 'ntri', 'np']
+    ignores = ['tri_cent', 'tri_nn', 'tri_area', 'neighbor_tri']
+    for s0, s1 in zip(surfs_1, surfs_2):
+        assert_equal(set(names), set(s0.keys()) - set(ignores))
+        assert_equal(set(names), set(s1.keys()) - set(ignores))
+        for name in names:
+            assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-6,
+                            err_msg='Mismatch: "%s"' % name)
+
+
+def _compare_bem_solutions(sol_a, sol_b):
+    """Helper to compare BEM solutions"""
+    # compare the surfaces we used
+    _compare_bem_surfaces(sol_a['surfs'], sol_b['surfs'])
+    # compare the actual solutions
+    names = ['bem_method', 'field_mult', 'gamma', 'is_sphere',
+             'nsol', 'sigma', 'source_mult', 'solution']
+    assert_equal(set(sol_a.keys()), set(sol_b.keys()))
+    assert_equal(set(names + ['surfs']), set(sol_b.keys()))
+    for key in names:
+        assert_allclose(sol_a[key], sol_b[key], rtol=1e-3, atol=1e-5,
+                        err_msg='Mismatch: %s' % key)
+
+
+ at testing.requires_testing_data
+def test_io_bem():
+    """Test reading and writing of bem surfaces and solutions
+    """
+    tempdir = _TempDir()
+    temp_bem = op.join(tempdir, 'temp-bem.fif')
+    assert_raises(ValueError, read_bem_surfaces, fname_raw)
+    assert_raises(ValueError, read_bem_surfaces, fname_bem_3, s_id=10)
+    surf = read_bem_surfaces(fname_bem_3, patch_stats=True)
+    surf = read_bem_surfaces(fname_bem_3, patch_stats=False)
+    write_bem_surfaces(temp_bem, surf[0])
+    surf_read = read_bem_surfaces(temp_bem, patch_stats=False)
+    _compare_bem_surfaces(surf, surf_read)
+
+    assert_raises(RuntimeError, read_bem_solution, fname_bem_3)
+    temp_sol = op.join(tempdir, 'temp-sol.fif')
+    sol = read_bem_solution(fname_bem_sol_3)
+    assert_true('BEM' in repr(sol))
+    write_bem_solution(temp_sol, sol)
+    sol_read = read_bem_solution(temp_sol)
+    _compare_bem_solutions(sol, sol_read)
+    sol = read_bem_solution(fname_bem_sol_1)
+    assert_raises(RuntimeError, _bem_find_surface, sol, 3)
+
+
+def test_make_sphere_model():
+    """Test making a sphere model"""
+    info = read_info(fname_raw)
+    assert_raises(ValueError, make_sphere_model, 'foo', 'auto', info)
+    assert_raises(ValueError, make_sphere_model, 'auto', 'auto', None)
+    # here we just make sure it works -- the functionality is actually
+    # tested more extensively e.g. in the forward and dipole code
+    bem = make_sphere_model('auto', 'auto', info)
+    assert_true('3 layers' in repr(bem))
+    assert_true('Sphere ' in repr(bem))
+    assert_true(' mm' in repr(bem))
+    bem = make_sphere_model('auto', None, info)
+    assert_true('no layers' in repr(bem))
+    assert_true('Sphere ' in repr(bem))
+
+
+ at testing.requires_testing_data
+def test_bem_model():
+    """Test BEM model creation from Python with I/O"""
+    tempdir = _TempDir()
+    fname_temp = op.join(tempdir, 'temp-bem.fif')
+    for kwargs, fname in zip((dict(), dict(conductivity=[0.3])),
+                             [fname_bem_3, fname_bem_1]):
+        model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir,
+                               **kwargs)
+        model_c = read_bem_surfaces(fname)
+        _compare_bem_surfaces(model, model_c)
+        write_bem_surfaces(fname_temp, model)
+        model_read = read_bem_surfaces(fname_temp)
+        _compare_bem_surfaces(model, model_c)
+        _compare_bem_surfaces(model_read, model_c)
+    assert_raises(ValueError, make_bem_model, 'sample',  # bad conductivity
+                  conductivity=[0.3, 0.006], subjects_dir=subjects_dir)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_bem_solution():
+    """Test making a BEM solution from Python with I/O"""
+    # test degenerate conditions
+    surf = read_bem_surfaces(fname_bem_1)[0]
+    assert_raises(RuntimeError, _ico_downsample, surf, 10)  # bad dec grade
+    s_bad = dict(tris=surf['tris'][1:], ntri=surf['ntri'] - 1, rr=surf['rr'])
+    assert_raises(RuntimeError, _ico_downsample, s_bad, 1)  # not isomorphic
+    s_bad = dict(tris=surf['tris'].copy(), ntri=surf['ntri'],
+                 rr=surf['rr'])  # bad triangulation
+    s_bad['tris'][0] = [0, 0, 0]
+    assert_raises(RuntimeError, _ico_downsample, s_bad, 1)
+    s_bad['id'] = 1
+    assert_raises(RuntimeError, _assert_complete_surface, s_bad)
+    s_bad = dict(tris=surf['tris'], ntri=surf['ntri'], rr=surf['rr'].copy())
+    s_bad['rr'][0] = 0.
+    assert_raises(RuntimeError, _get_ico_map, surf, s_bad)
+
+    surfs = read_bem_surfaces(fname_bem_3)
+    assert_raises(RuntimeError, _assert_inside, surfs[0], surfs[1])  # outside
+    surfs[0]['id'] = 100  # bad surfs
+    assert_raises(RuntimeError, _order_surfaces, surfs)
+    surfs[1]['rr'] /= 1000.
+    assert_raises(RuntimeError, _check_surface_size, surfs[1])
+
+    # actually test functionality
+    tempdir = _TempDir()
+    fname_temp = op.join(tempdir, 'temp-bem-sol.fif')
+    # use a model and solution made in Python
+    conductivities = [(0.3,), (0.3, 0.006, 0.3)]
+    fnames = [fname_bem_sol_1, fname_bem_sol_3]
+    for cond, fname in zip(conductivities, fnames):
+        for model_type in ('python', 'c'):
+            if model_type == 'python':
+                model = make_bem_model('sample', conductivity=cond, ico=2,
+                                       subjects_dir=subjects_dir)
+            else:
+                model = fname_bem_1 if len(cond) == 1 else fname_bem_3
+        solution = make_bem_solution(model)
+        solution_c = read_bem_solution(fname)
+        _compare_bem_solutions(solution, solution_c)
+        write_bem_solution(fname_temp, solution)
+        solution_read = read_bem_solution(fname_temp)
+        _compare_bem_solutions(solution, solution_c)
+        _compare_bem_solutions(solution_read, solution_c)
+
+
+def test_fit_sphere_to_headshape():
+    """Test fitting a sphere to digitization points"""
+    # Create points of various kinds
+    rad = 90.  # mm
+    center = np.array([0.5, -10., 40.])  # mm
+    dev_trans = np.array([0., -0.005, -10.])
+    dev_center = center - dev_trans
+    dig = [
+        # Left auricular
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_LPA,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([-1.0, 0.0, 0.0])},
+        # Nasion
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_NASION,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([0.0, 1.0, 0.0])},
+        # Right auricular
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'ident': FIFF.FIFFV_POINT_RPA,
+         'kind': FIFF.FIFFV_POINT_CARDINAL,
+         'r': np.array([1.0, 0.0, 0.0])},
+
+        # Top of the head (extra point)
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EXTRA,
+         'r': np.array([0.0, 0.0, 1.0])},
+
+        # EEG points
+        # Fz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0, .72, .69])},
+        # F3
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([-.55, .67, .50])},
+        # F4
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([.55, .67, .50])},
+        # Cz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0.0, 0.0, 1.0])},
+        # Pz
+        {'coord_frame': FIFF.FIFFV_COORD_HEAD,
+         'kind': FIFF.FIFFV_POINT_EEG,
+         'r': np.array([0, -.72, .69])},
+    ]
+    for d in dig:
+        d['r'] *= rad / 1000.
+        d['r'] += center / 1000.
+
+    # Device to head transformation (rotate .2 rad over X-axis)
+    dev_head_t = Transform('meg', 'head', translation(*(dev_trans / 1000.)))
+
+    info = {'dig': dig, 'dev_head_t': dev_head_t}
+
+    # Degenerate conditions
+    assert_raises(ValueError, fit_sphere_to_headshape, info,
+                  dig_kinds=(FIFF.FIFFV_POINT_HPI,))
+    info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
+    assert_raises(RuntimeError, fit_sphere_to_headshape, info)
+    info['dig'][0]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
+
+    #  # Test with 4 points that match a perfect sphere
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    kwargs = dict(rtol=1e-3, atol=1e-2)  # in mm
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, dev_center, **kwargs)
+
+    # Test with all points
+    dig_kinds = (FIFF.FIFFV_POINT_CARDINAL, FIFF.FIFFV_POINT_EXTRA,
+                 FIFF.FIFFV_POINT_EXTRA)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, dev_center, **kwargs)
+
+    # Test with some noisy EEG points only.
+    dig_kinds = (FIFF.FIFFV_POINT_EEG,)
+    r, oh, od = fit_sphere_to_headshape(info, dig_kinds=dig_kinds)
+    kwargs = dict(rtol=1e-3, atol=10.)  # in mm
+    assert_allclose(r, rad, **kwargs)
+    assert_allclose(oh, center, **kwargs)
+    assert_allclose(od, center, **kwargs)
+
+    dig = [dict(coord_frame=FIFF.FIFFV_COORD_DEVICE, )]
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_channels.py b/mne/tests/test_channels.py
deleted file mode 100644
index 391725d..0000000
--- a/mne/tests/test_channels.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Author: Daniel G Wakeman <dwakeman at nmr.mgh.harvard.edu>
-#         Denis A. Engemann <denis.engemann at gmail.com>
-#
-# License: BSD (3-clause)
-
-import os.path as op
-
-from copy import deepcopy
-
-import numpy as np
-from nose.tools import assert_raises, assert_true, assert_equal
-from scipy.io import savemat
-
-from mne.channels import (rename_channels, read_ch_connectivity,
-                          ch_neighbor_connectivity)
-from mne.io import read_info
-from mne.io.constants import FIFF
-from mne.fixes import partial
-from mne.utils import _TempDir
-
-tempdir = _TempDir()
-
-
-base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
-raw_fname = op.join(base_dir, 'test_raw.fif')
-
-
-def test_rename_channels():
-    """Test rename channels
-    """
-    info = read_info(raw_fname)
-    # Error Tests
-    # Test channel name exists in ch_names
-    mapping = {'EEG 160': 'EEG060'}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test change to EEG channel
-    mapping = {'EOG 061': ('EEG 061', 'eeg')}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test change to illegal channel type
-    mapping = {'EOG 061': ('MEG 061', 'meg')}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test channel type which you are changing from e.g. MEG
-    mapping = {'MEG 2641': ('MEG2641', 'eeg')}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test improper mapping configuration
-    mapping = {'MEG 2641': 1.0}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test duplicate named channels
-    mapping = {'EEG 060': 'EOG 061'}
-    assert_raises(ValueError, rename_channels, info, mapping)
-    # Test successful changes
-    # Test ch_name and ch_names are changed
-    info2 = deepcopy(info)  # for consistency at the start of each test
-    info2['bads'] = ['EEG 060', 'EOG 061']
-    mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
-    rename_channels(info2, mapping)
-    assert_true(info2['chs'][374]['ch_name'] == 'EEG060')
-    assert_true(info2['ch_names'][374] == 'EEG060')
-    assert_true('EEG060' in info2['bads'])
-    assert_true(info2['chs'][375]['ch_name'] == 'EOG061')
-    assert_true(info2['ch_names'][375] == 'EOG061')
-    assert_true('EOG061' in info2['bads'])
-    # Test type change
-    info2 = deepcopy(info)
-    info2['bads'] = ['EEG 060', 'EEG 059']
-    mapping = {'EEG 060': ('EOG 060', 'eog'), 'EEG 059': ('EOG 059', 'eog')}
-    rename_channels(info2, mapping)
-    assert_true(info2['chs'][374]['ch_name'] == 'EOG 060')
-    assert_true(info2['ch_names'][374] == 'EOG 060')
-    assert_true('EOG 060' in info2['bads'])
-    assert_true(info2['chs'][374]['kind'] is FIFF.FIFFV_EOG_CH)
-    assert_true(info2['chs'][373]['ch_name'] == 'EOG 059')
-    assert_true(info2['ch_names'][373] == 'EOG 059')
-    assert_true('EOG 059' in info2['bads'])
-    assert_true(info2['chs'][373]['kind'] is FIFF.FIFFV_EOG_CH)
-
-
-def test_read_ch_connectivity():
-    "Test reading channel connectivity templates"
-    a = partial(np.array, dtype='<U7')
-    # no pep8
-    nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
-                     (['MEG0121'], [[a(['MEG0111'])],
-                                    [a(['MEG0131'])]]),
-                     (['MEG0131'], [[a(['MEG0111'])],
-                                    [a(['MEG0121'])]])]],
-                   dtype=[('label', 'O'), ('neighblabel', 'O')])
-    mat = dict(neighbours=nbh)
-    mat_fname = op.join(tempdir, 'test_mat.mat')
-    savemat(mat_fname, mat)
-
-    ch_connectivity = read_ch_connectivity(mat_fname)
-    x = ch_connectivity
-    assert_equal(x.shape, (3, 3))
-    assert_equal(x[0, 1], False)
-    assert_equal(x[0, 2], True)
-    assert_true(np.all(x.diagonal()))
-    assert_raises(ValueError, read_ch_connectivity, mat_fname, [0, 3])
-    ch_connectivity = read_ch_connectivity(mat_fname, picks=[0, 2])
-    assert_equal(ch_connectivity.shape[0], 2)
-
-    ch_names = ['EEG01', 'EEG02', 'EEG03']
-    neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
-    assert_raises(ValueError, ch_neighbor_connectivity, ch_names, neighbors)
-    neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
-    assert_raises(ValueError, ch_neighbor_connectivity, ch_names[:2],
-                  neighbors)
-    neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
-    assert_raises(ValueError, ch_neighbor_connectivity, ch_names, neighbors)
diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py
new file mode 100644
index 0000000..8d837bf
--- /dev/null
+++ b/mne/tests/test_chpi.py
@@ -0,0 +1,168 @@
+# Author: Eric Larson <larson.eric.d at gmail.com>
+#
+# License: BSD (3-clause)
+
+import os.path as op
+import numpy as np
+from numpy.testing import assert_allclose
+from nose.tools import assert_raises, assert_equal, assert_true
+import warnings
+
+from mne.io import read_info, Raw
+from mne.io.constants import FIFF
+from mne.chpi import (_rot_to_quat, _quat_to_rot, get_chpi_positions,
+                      _calculate_chpi_positions, _angle_between_quats)
+from mne.utils import (run_tests_if_main, _TempDir, slow_test, set_log_file,
+                       requires_version)
+from mne.datasets import testing
+
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+test_fif_fname = op.join(base_dir, 'test_raw.fif')
+ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
+hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
+
+data_path = testing.data_path(download=False)
+raw_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
+pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
+sss_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_sss.fif')
+
+warnings.simplefilter('always')
+
+
+def test_quaternions():
+    """Test quaternion calculations
+    """
+    rots = [np.eye(3)]
+    for fname in [test_fif_fname, ctf_fname, hp_fif_fname]:
+        rots += [read_info(fname)['dev_head_t']['trans'][:3, :3]]
+    for rot in rots:
+        assert_allclose(rot, _quat_to_rot(_rot_to_quat(rot)),
+                        rtol=1e-5, atol=1e-5)
+        rot = rot[np.newaxis, np.newaxis, :, :]
+        assert_allclose(rot, _quat_to_rot(_rot_to_quat(rot)),
+                        rtol=1e-5, atol=1e-5)
+
+    # let's make sure our angle function works in some reasonable way
+    for ii in range(3):
+        for jj in range(3):
+            a = np.zeros(3)
+            b = np.zeros(3)
+            a[ii] = 1.
+            b[jj] = 1.
+            expected = np.pi if ii != jj else 0.
+            assert_allclose(_angle_between_quats(a, b), expected, atol=1e-5)
+
+
+def test_get_chpi():
+    """Test CHPI position computation
+    """
+    trans0, rot0 = get_chpi_positions(hp_fname)[:2]
+    trans0, rot0 = trans0[:-1], rot0[:-1]
+    raw = Raw(hp_fif_fname)
+    out = get_chpi_positions(raw)
+    trans1, rot1, t1 = out
+    trans1, rot1 = trans1[2:], rot1[2:]
+    # these will not be exact because they don't use equiv. time points
+    assert_allclose(trans0, trans1, atol=1e-5, rtol=1e-1)
+    assert_allclose(rot0, rot1, atol=1e-6, rtol=1e-1)
+    # run through input checking
+    assert_raises(TypeError, get_chpi_positions, 1)
+    assert_raises(ValueError, get_chpi_positions, hp_fname, [1])
+    raw_no_chpi = Raw(test_fif_fname)
+    assert_raises(RuntimeError, get_chpi_positions, raw_no_chpi)
+    assert_raises(ValueError, get_chpi_positions, raw, t_step='foo')
+    assert_raises(IOError, get_chpi_positions, 'foo')
+
+
+ at testing.requires_testing_data
+def test_hpi_info():
+    """Test getting HPI info
+    """
+    tempdir = _TempDir()
+    temp_name = op.join(tempdir, 'temp_raw.fif')
+    for fname in (raw_fif_fname, sss_fif_fname):
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            raw = Raw(fname, allow_maxshield=True)
+        assert_true(len(raw.info['hpi_subsystem']) > 0)
+        raw.save(temp_name, overwrite=True)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            raw_2 = Raw(temp_name, allow_maxshield=True)
+        assert_equal(len(raw_2.info['hpi_subsystem']),
+                     len(raw.info['hpi_subsystem']))
+
+
+def _compare_positions(a, b, max_dist=0.003, max_angle=5.):
+    """Compare estimated cHPI positions"""
+    from scipy.interpolate import interp1d
+    trans, rot, t = a
+    trans_est, rot_est, t_est = b
+    quats_est = _rot_to_quat(rot_est)
+
+    # maxfilter produces some times that are implausibly large (weird)
+    use_mask = (t >= t_est[0]) & (t <= t_est[-1])
+    t = t[use_mask]
+    trans = trans[use_mask]
+    quats = _rot_to_quat(rot)
+    quats = quats[use_mask]
+
+    # double-check our angle function
+    for q in (quats, quats_est):
+        angles = _angle_between_quats(q, q)
+        assert_allclose(angles, 0., atol=1e-5)
+
+    # < 3 mm translation difference between MF and our estimation
+    trans_est_interp = interp1d(t_est, trans_est, axis=0)(t)
+    worst = np.sqrt(np.sum((trans - trans_est_interp) ** 2, axis=1)).max()
+    assert_true(worst <= max_dist, '%0.1f > %0.1f mm'
+                % (1000 * worst, 1000 * max_dist))
+
+    # < 5 degrees rotation difference between MF and our estimation
+    # (note that the interpolation will make this slightly worse)
+    quats_est_interp = interp1d(t_est, quats_est, axis=0)(t)
+    worst = 180 * _angle_between_quats(quats_est_interp, quats).max() / np.pi
+    assert_true(worst <= max_angle, '%0.1f > %0.1f deg' % (worst, max_angle,))
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
+ at requires_version('numpy', '1.7')
+def test_calculate_chpi_positions():
+    """Test calculation of cHPI positions
+    """
+    trans, rot, t = get_chpi_positions(pos_fname)
+    with warnings.catch_warnings(record=True):
+        raw = Raw(raw_fif_fname, allow_maxshield=True, preload=True)
+    t -= raw.first_samp / raw.info['sfreq']
+    trans_est, rot_est, t_est = _calculate_chpi_positions(raw, verbose='debug')
+    _compare_positions((trans, rot, t), (trans_est, rot_est, t_est))
+
+    # degenerate conditions
+    raw_no_chpi = Raw(test_fif_fname)
+    assert_raises(RuntimeError, _calculate_chpi_positions, raw_no_chpi)
+    raw_bad = raw.copy()
+    for d in raw_bad.info['dig']:
+        if d['kind'] == FIFF.FIFFV_POINT_HPI:
+            d['coord_frame'] = 999
+            break
+    assert_raises(RuntimeError, _calculate_chpi_positions, raw_bad)
+    raw_bad = raw.copy()
+    for d in raw_bad.info['dig']:
+        if d['kind'] == FIFF.FIFFV_POINT_HPI:
+            d['r'] = np.ones(3)
+    raw_bad.crop(0, 1., copy=False)
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    set_log_file(log_file, overwrite=True)
+    try:
+        _calculate_chpi_positions(raw_bad)
+    finally:
+        set_log_file()
+    with open(log_file, 'r') as fid:
+        for line in fid:
+            assert_true('0/5 acceptable' in line)
+
+run_tests_if_main()
diff --git a/mne/tests/test_coreg.py b/mne/tests/test_coreg.py
index e6b2ea2..0735f8e 100644
--- a/mne/tests/test_coreg.py
+++ b/mne/tests/test_coreg.py
@@ -1,36 +1,28 @@
+from glob import glob
 import os
 
-from nose.tools import assert_raises, assert_true, assert_equal
+from nose.tools import assert_raises, assert_true
 import numpy as np
-from numpy.testing import (assert_array_equal, assert_array_almost_equal,
-                           assert_array_less)
+from numpy.testing import assert_array_almost_equal, assert_array_less
 
+import mne
 from mne.transforms import apply_trans, rotation, translation, scaling
 from mne.coreg import (fit_matched_points, fit_point_cloud,
                        _point_cloud_error, _decimate_points,
                        create_default_subject, scale_mri,
-                       _is_mri_subject, scale_labels, scale_source_space,
-                       read_elp)
-from mne.io.kit.tests import data_dir as kit_data_dir
-from mne.utils import requires_mne_fs_in_env, _TempDir, run_subprocess
+                       _is_mri_subject, scale_labels, scale_source_space)
+from mne.utils import (requires_mne, requires_freesurfer, _TempDir,
+                       run_tests_if_main, requires_version)
 from functools import reduce
 
 
-tempdir = _TempDir()
-
-
-def test_read_elp():
-    """Test reading an ELP file"""
-    path = os.path.join(kit_data_dir, 'test_elp.txt')
-    points = read_elp(path)
-    assert_equal(points.shape, (8, 3))
-    assert_array_equal(points[0], [1.3930, 13.1613, -4.6967])
-
-
- at requires_mne_fs_in_env
+ at requires_mne
+ at requires_freesurfer
+ at requires_version('scipy', '0.11')
 def test_scale_mri():
     """Test creating fsaverage and scaling it"""
     # create fsaverage
+    tempdir = _TempDir()
     create_default_subject(subjects_dir=tempdir)
     is_mri = _is_mri_subject('fsaverage', tempdir)
     assert_true(is_mri, "Creating fsaverage failed")
@@ -41,29 +33,42 @@ def test_scale_mri():
     create_default_subject(update=True, subjects_dir=tempdir)
     assert_true(os.path.exists(fid_path), "Updating fsaverage")
 
+    # remove redundant label files
+    label_temp = os.path.join(tempdir, 'fsaverage', 'label', '*.label')
+    label_paths = glob(label_temp)
+    for label_path in label_paths[1:]:
+        os.remove(label_path)
+
     # create source space
-    path = os.path.join(tempdir, 'fsaverage', 'bem', 'fsaverage-ico-6-src.fif')
-    if not os.path.exists(path):
-        cmd = ['mne_setup_source_space', '--subject', 'fsaverage', '--ico',
-               '6']
-        env = os.environ.copy()
-        env['SUBJECTS_DIR'] = tempdir
-        run_subprocess(cmd, env=env)
+    path = os.path.join(tempdir, 'fsaverage', 'bem', 'fsaverage-ico-0-src.fif')
+    mne.setup_source_space('fsaverage', path, 'ico0', overwrite=True,
+                           subjects_dir=tempdir, add_dist=False)
 
     # scale fsaverage
-    scale_mri('fsaverage', 'flachkopf', [1, .2, .8], True, subjects_dir=tempdir)
+    os.environ['_MNE_FEW_SURFACES'] = 'true'
+    scale_mri('fsaverage', 'flachkopf', [1, .2, .8], True,
+              subjects_dir=tempdir)
+    del os.environ['_MNE_FEW_SURFACES']
     is_mri = _is_mri_subject('flachkopf', tempdir)
     assert_true(is_mri, "Scaling fsaverage failed")
     src_path = os.path.join(tempdir, 'flachkopf', 'bem',
-                            'flachkopf-ico-6-src.fif')
+                            'flachkopf-ico-0-src.fif')
     assert_true(os.path.exists(src_path), "Source space was not scaled")
     scale_labels('flachkopf', subjects_dir=tempdir)
 
     # scale source space separately
     os.remove(src_path)
-    scale_source_space('flachkopf', 'ico-6', subjects_dir=tempdir)
+    scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
     assert_true(os.path.exists(src_path), "Source space was not scaled")
 
+    # add distances to source space
+    src = mne.read_source_spaces(path)
+    mne.add_source_space_distances(src)
+    src.save(path)
+
+    # scale with distances
+    os.remove(src_path)
+    scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
 
 
 def test_fit_matched_points():
@@ -83,7 +88,7 @@ def test_fit_matched_points():
     trans = np.dot(rotation(2, 6, 3), scaling(.5, .5, .5))
     src_pts = apply_trans(trans, tgt_pts)
     trans_est = fit_matched_points(src_pts, tgt_pts, translate=False, scale=1,
-                                out='trans')
+                                   out='trans')
     est_pts = apply_trans(trans_est, src_pts)
     assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
                               "rotation and scaling.")
@@ -164,3 +169,6 @@ def test_fit_point_cloud():
     err = _point_cloud_error(est_pts, tgt_pts)
     assert_array_less(err, .1, "fit_point_cloud with rotation and 3 scaling "
                       "parameters.")
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py
index 37cc9c0..6619b04 100644
--- a/mne/tests/test_cov.py
+++ b/mne/tests/test_cov.py
@@ -1,23 +1,32 @@
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#         Denis Engemann <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
 import os.path as op
 
-from nose.tools import assert_true
-from numpy.testing import assert_array_almost_equal
+from nose.tools import assert_true, assert_equal
+from numpy.testing import assert_array_almost_equal, assert_array_equal
 from nose.tools import assert_raises
 import numpy as np
 from scipy import linalg
 import warnings
+import itertools as itt
+
+from mne.cov import (regularize, whiten_evoked, _estimate_rank_meeg_cov,
+                     _auto_low_rank_model, _apply_scaling_cov,
+                     _undo_scaling_cov)
 
-from mne.cov import regularize, whiten_evoked
 from mne import (read_cov, write_cov, Epochs, merge_events,
-                 find_events, compute_raw_data_covariance,
-                 compute_covariance, read_evokeds)
-from mne import pick_channels_cov, pick_channels, pick_types
+                 find_events, compute_raw_covariance,
+                 compute_covariance, read_evokeds, compute_proj_raw,
+                 pick_channels_cov, pick_channels, pick_types, pick_info,
+                 make_ad_hoc_cov)
 from mne.io import Raw
-from mne.utils import _TempDir
+from mne.utils import (_TempDir, slow_test, requires_sklearn_0_15,
+                       run_tests_if_main)
+from mne.io.proc_history import _get_sss_rank
+from mne.io.pick import channel_type, _picks_by_type
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -28,17 +37,34 @@ cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 ave_fname = op.join(base_dir, 'test-ave.fif')
 erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
+hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
+
 
-tempdir = _TempDir()
+def test_ad_hoc_cov():
+    """Test ad hoc cov creation and I/O"""
+    tempdir = _TempDir()
+    out_fname = op.join(tempdir, 'test-cov.fif')
+    evoked = read_evokeds(ave_fname)[0]
+    cov = make_ad_hoc_cov(evoked.info)
+    cov.save(out_fname)
+    assert_true('Covariance' in repr(cov))
+    cov2 = read_cov(out_fname)
+    assert_array_almost_equal(cov['data'], cov2['data'])
 
 
 def test_io_cov():
     """Test IO for noise covariance matrices
     """
+    tempdir = _TempDir()
     cov = read_cov(cov_fname)
+    cov['method'] = 'empirical'
+    cov['loglik'] = -np.inf
     cov.save(op.join(tempdir, 'test-cov.fif'))
     cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
     assert_array_almost_equal(cov.data, cov2.data)
+    assert_equal(cov['method'], cov2['method'])
+    assert_equal(cov['loglik'], cov2['loglik'])
+    assert_true('Covariance' in repr(cov))
 
     cov2 = read_cov(cov_gz_fname)
     assert_array_almost_equal(cov.data, cov2.data)
@@ -70,12 +96,13 @@ def test_io_cov():
 def test_cov_estimation_on_raw_segment():
     """Test estimation from raw on continuous recordings (typically empty room)
     """
+    tempdir = _TempDir()
     raw = Raw(raw_fname, preload=False)
-    cov = compute_raw_data_covariance(raw)
+    cov = compute_raw_covariance(raw)
     cov_mne = read_cov(erm_cov_fname)
     assert_true(cov_mne.ch_names == cov.ch_names)
-    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
-                / linalg.norm(cov.data, ord='fro') < 1e-4)
+    assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                linalg.norm(cov.data, ord='fro') < 1e-4)
 
     # test IO when computation done in Python
     cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
@@ -86,7 +113,7 @@ def test_cov_estimation_on_raw_segment():
 
     # test with a subset of channels
     picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
-    cov = compute_raw_data_covariance(raw, picks=picks)
+    cov = compute_raw_covariance(raw, picks=picks)
     assert_true(cov_mne.ch_names[:5] == cov.ch_names)
     assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
                 ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
@@ -94,13 +121,15 @@ def test_cov_estimation_on_raw_segment():
     raw_2 = raw.crop(0, 1)
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
-        cov = compute_raw_data_covariance(raw_2)
+        cov = compute_raw_covariance(raw_2)
     assert_true(len(w) == 1)
 
 
+ at slow_test
 def test_cov_estimation_with_triggers():
     """Test estimation from raw with triggers
     """
+    tempdir = _TempDir()
     raw = Raw(raw_fname, preload=False)
     events = find_events(raw, stim_channel='STI 014')
     event_ids = [1, 2, 3, 4]
@@ -115,14 +144,14 @@ def test_cov_estimation_with_triggers():
     cov = compute_covariance(epochs, keep_sample_mean=True)
     cov_mne = read_cov(cov_km_fname)
     assert_true(cov_mne.ch_names == cov.ch_names)
-    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
-                 / linalg.norm(cov.data, ord='fro')) < 0.005)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                linalg.norm(cov.data, ord='fro')) < 0.005)
 
     # Test with tmin and tmax (different but not too much)
     cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
     assert_true(np.all(cov.data != cov_tmin_tmax.data))
-    assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro')
-                 / linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)
+    assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
+                 linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)
 
     # cov using a list of epochs and keep_sample_mean=True
     epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
@@ -137,16 +166,23 @@ def test_cov_estimation_with_triggers():
     cov = compute_covariance(epochs, keep_sample_mean=False)
     cov_mne = read_cov(cov_fname)
     assert_true(cov_mne.ch_names == cov.ch_names)
-    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
-                 / linalg.norm(cov.data, ord='fro')) < 0.005)
+    assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro') /
+                 linalg.norm(cov.data, ord='fro')) < 0.005)
+
+    method_params = {'empirical': {'assume_centered': False}}
+    assert_raises(ValueError, compute_covariance, epochs,
+                  keep_sample_mean=False, method_params=method_params)
+
+    assert_raises(ValueError, compute_covariance, epochs,
+                  keep_sample_mean=False, method='factor_analysis')
 
     # test IO when computation done in Python
     cov.save(op.join(tempdir, 'test-cov.fif'))  # test saving
     cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
     assert_true(cov_read.ch_names == cov.ch_names)
     assert_true(cov_read.nfree == cov.nfree)
-    assert_true((linalg.norm(cov.data - cov_read.data, ord='fro')
-                 / linalg.norm(cov.data, ord='fro')) < 1e-5)
+    assert_true((linalg.norm(cov.data - cov_read.data, ord='fro') /
+                 linalg.norm(cov.data, ord='fro')) < 1e-5)
 
     # cov with list of epochs with different projectors
     epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
@@ -218,3 +254,211 @@ def test_evoked_whiten():
     mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
     assert_true(np.all(mean_baseline < 1.))
     assert_true(np.all(mean_baseline > 0.2))
+
+
+ at slow_test
+def test_rank():
+    """Test cov rank estimation"""
+    raw_sample = Raw(raw_fname)
+
+    raw_sss = Raw(hp_fif_fname)
+    raw_sss.add_proj(compute_proj_raw(raw_sss))
+
+    cov_sample = compute_raw_covariance(raw_sample)
+    cov_sample_proj = compute_raw_covariance(
+        raw_sample.copy().apply_proj())
+
+    cov_sss = compute_raw_covariance(raw_sss)
+    cov_sss_proj = compute_raw_covariance(
+        raw_sss.copy().apply_proj())
+
+    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
+    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)
+
+    info_sample = pick_info(raw_sample.info, picks_all_sample)
+    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
+                                             eeg=True))]
+    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
+    picks_stack_sample += [('all',
+                            pick_types(info_sample, meg=True, eeg=True))]
+
+    info_sss = pick_info(raw_sss.info, picks_all_sss)
+    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
+    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
+    picks_stack_somato += [('all',
+                            pick_types(info_sss, meg=True, eeg=True))]
+
+    iter_tests = list(itt.product(
+        [(cov_sample, picks_stack_sample, info_sample),
+         (cov_sample_proj, picks_stack_sample, info_sample),
+         (cov_sss, picks_stack_somato, info_sss),
+         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
+        [dict(mag=1e15, grad=1e13, eeg=1e6)]
+    ))
+
+    for (cov, picks_list, this_info), scalings in iter_tests:
+        for ch_type, picks in picks_list:
+
+            this_very_info = pick_info(this_info, picks)
+
+            # compute subset of projs
+            this_projs = [c['active'] and
+                          len(set(c['data']['col_names'])
+                              .intersection(set(this_very_info['ch_names']))) >
+                          0 for c in cov['projs']]
+            n_projs = sum(this_projs)
+
+            # count channel types
+            ch_types = [channel_type(this_very_info, idx)
+                        for idx in range(len(picks))]
+            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
+                                    ['eeg', 'mag', 'grad']]
+            n_meg = n_mag + n_grad
+            if ch_type in ('all', 'eeg'):
+                n_projs_eeg = 1
+            else:
+                n_projs_eeg = 0
+
+            # check sss
+            if 'proc_history' in this_very_info:
+                mf = this_very_info['proc_history'][0]['max_info']
+                n_free = _get_sss_rank(mf)
+                if 'mag' not in ch_types and 'grad' not in ch_types:
+                    n_free = 0
+                # - n_projs XXX clarify
+                expected_rank = n_free + n_eeg
+                if n_projs > 0 and ch_type in ('all', 'eeg'):
+                    expected_rank -= n_projs_eeg
+            else:
+                expected_rank = n_meg + n_eeg - n_projs
+
+            C = cov['data'][np.ix_(picks, picks)]
+            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
+                                               scalings=scalings)
+
+            assert_equal(expected_rank, est_rank)
+
+
+def test_cov_scaling():
+    """Test rescaling covs"""
+    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
+                          proj=True)
+    cov = read_cov(cov_fname)['data']
+    cov2 = read_cov(cov_fname)['data']
+
+    assert_array_equal(cov, cov2)
+    evoked.pick_channels([evoked.ch_names[k] for k in pick_types(
+        evoked.info, meg=True, eeg=True
+    )])
+    picks_list = _picks_by_type(evoked.info)
+    scalings = dict(mag=1e15, grad=1e13, eeg=1e6)
+
+    _apply_scaling_cov(cov2, picks_list, scalings=scalings)
+    _apply_scaling_cov(cov, picks_list, scalings=scalings)
+    assert_array_equal(cov, cov2)
+    assert_true(cov.max() > 1)
+
+    _undo_scaling_cov(cov2, picks_list, scalings=scalings)
+    _undo_scaling_cov(cov, picks_list, scalings=scalings)
+    assert_array_equal(cov, cov2)
+    assert_true(cov.max() < 1)
+
+
+ at requires_sklearn_0_15
+def test_auto_low_rank():
+    """Test probabilistic low rank estimators"""
+
+    n_samples, n_features, rank = 400, 20, 10
+    sigma = 0.1
+
+    def get_data(n_samples, n_features, rank, sigma):
+        rng = np.random.RandomState(42)
+        W = rng.randn(n_features, n_features)
+        X = rng.randn(n_samples, rank)
+        U, _, _ = linalg.svd(W.copy())
+        X = np.dot(X, U[:, :rank].T)
+
+        sigmas = sigma * rng.rand(n_features) + sigma / 2.
+        X += rng.randn(n_samples, n_features) * sigmas
+        return X
+
+    X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
+                 sigma=sigma)
+    method_params = {'iter_n_components': [9, 10, 11]}
+    cv = 3
+    n_jobs = 1
+    mode = 'factor_analysis'
+    rescale = 1e8
+    X *= rescale
+    est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
+                                     method_params=method_params,
+                                     cv=cv)
+    assert_equal(info['best'], rank)
+
+    X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
+                 sigma=sigma)
+    method_params = {'iter_n_components': [n_features + 5]}
+    msg = ('You are trying to estimate %i components on matrix '
+           'with %i features.')
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
+                             method_params=method_params, cv=cv)
+        assert_equal(len(w), 1)
+        assert_equal(msg % (n_features + 5, n_features), '%s' % w[0].message)
+
+    method_params = {'iter_n_components': [n_features + 5]}
+    assert_raises(ValueError, _auto_low_rank_model, X, mode='foo',
+                  n_jobs=n_jobs, method_params=method_params, cv=cv)
+
+
+ at slow_test
+ at requires_sklearn_0_15
+def test_compute_covariance_auto_reg():
+    """Test automated regularization"""
+
+    raw = Raw(raw_fname, preload=False)
+    events = find_events(raw, stim_channel='STI 014')
+    event_ids = [1, 2, 3, 4]
+    reject = dict(mag=4e-12)
+
+    # cov with merged events and keep_sample_mean=True
+    events_merged = merge_events(events, event_ids, 1234)
+    picks = pick_types(raw.info, meg='mag', eeg=False)
+    epochs = Epochs(
+        raw, events_merged, 1234, tmin=-0.2, tmax=0,
+        picks=picks[:10],  # we need a few channels for numerical reasons
+        # in PCA/FA.
+        baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
+    epochs = epochs.crop(None, 0)[:10]
+
+    method_params = dict(factor_analysis=dict(iter_n_components=[3]),
+                         pca=dict(iter_n_components=[3]))
+
+    covs = compute_covariance(epochs, method='auto',
+                              method_params=method_params,
+                              projs=True,
+                              return_estimators=True)
+
+    logliks = [c['loglik'] for c in covs]
+    assert_true(np.diff(logliks).max() <= 0)  # descending order
+
+    methods = ['empirical',
+               'factor_analysis',
+               'ledoit_wolf',
+               'pca']
+    cov3 = compute_covariance(epochs, method=methods,
+                              method_params=method_params, projs=None,
+                              return_estimators=True)
+
+    assert_equal(set([c['method'] for c in cov3]),
+                 set(methods))
+
+    # invalid prespecified method
+    assert_raises(ValueError, compute_covariance, epochs, method='pizza')
+
+    # invalid scalings
+    assert_raises(ValueError, compute_covariance, epochs, method='shrunk',
+                  scalings=dict(misc=123))
+
+run_tests_if_main()
diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py
new file mode 100644
index 0000000..807a693
--- /dev/null
+++ b/mne/tests/test_defaults.py
@@ -0,0 +1,22 @@
+from nose.tools import assert_equal, assert_true
+from copy import deepcopy
+
+from mne.defaults import _handle_default
+
+
+def test_handle_default():
+    """Test mutable default
+    """
+    x = deepcopy(_handle_default('scalings'))
+    y = _handle_default('scalings')
+    z = _handle_default('scalings', dict(mag=1, grad=2))
+    w = _handle_default('scalings', {})
+    assert_equal(set(x.keys()), set(y.keys()))
+    assert_equal(set(x.keys()), set(z.keys()))
+    for key in x.keys():
+        assert_equal(x[key], y[key])
+        assert_equal(x[key], w[key])
+        if key in ('mag', 'grad'):
+            assert_true(x[key] != z[key])
+        else:
+            assert_equal(x[key], z[key])
diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py
index d5314f4..4819578 100644
--- a/mne/tests/test_dipole.py
+++ b/mne/tests/test_dipole.py
@@ -1,22 +1,256 @@
 import os.path as op
-from nose.tools import assert_true
+import numpy as np
+from nose.tools import assert_true, assert_equal, assert_raises
+from numpy.testing import assert_allclose
+import warnings
 
-from mne import read_dip
-from mne.datasets import sample
+from mne import (read_dipole, read_forward_solution,
+                 convert_forward_solution, read_evokeds, read_cov,
+                 SourceEstimate, write_evokeds, fit_dipole,
+                 transform_surface_to, make_sphere_model, pick_types,
+                 pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
+                 make_forward_solution)
+from mne.simulation import simulate_evoked
+from mne.datasets import testing
+from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
+                       run_subprocess)
+from mne.proj import make_eeg_average_ref_proj
 
-data_path = sample.data_path(download=False)
-dip_fname = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis_set1.dip')
+from mne.io import Raw
 
+from mne.surface import _compute_nearest
+from mne.bem import _bem_find_surface, read_bem_solution
+from mne.transforms import (read_trans, apply_trans, _get_mri_head_t)
 
- at sample.requires_sample_data
-def test_io_dip():
+warnings.simplefilter('always')
+data_path = testing.data_path(download=False)
+fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
+fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
+fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
+fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
+fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-1280-1280-1280-bem-sol.fif')
+fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
+                    'sample-oct-2-src.fif')
+fname_trans = op.join(data_path, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+fname_fwd = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+subjects_dir = op.join(data_path, 'subjects')
+
+
+def _compare_dipoles(orig, new):
+    """Compare dipole results for equivalence"""
+    assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
+    assert_allclose(orig.pos, new.pos, err_msg='pos')
+    assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
+    assert_allclose(orig.gof, new.gof, err_msg='gof')
+    assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
+    assert_equal(orig.name, new.name)
+
+
+def _check_dipole(dip, n_dipoles):
+    assert_equal(len(dip), n_dipoles)
+    assert_equal(dip.pos.shape, (n_dipoles, 3))
+    assert_equal(dip.ori.shape, (n_dipoles, 3))
+    assert_equal(dip.gof.shape, (n_dipoles,))
+    assert_equal(dip.amplitude.shape, (n_dipoles,))
+
+
+ at testing.requires_testing_data
+def test_io_dipoles():
     """Test IO for .dip files
     """
-    time, pos, amplitude, ori, gof = read_dip(dip_fname)
+    tempdir = _TempDir()
+    dipole = read_dipole(fname_dip)
+    print(dipole)  # test repr
+    out_fname = op.join(tempdir, 'temp.dip')
+    dipole.save(out_fname)
+    dipole_new = read_dipole(out_fname)
+    _compare_dipoles(dipole, dipole_new)
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_mne
+def test_dipole_fitting():
+    """Test dipole fitting"""
+    amp = 10e-9
+    tempdir = _TempDir()
+    rng = np.random.RandomState(0)
+    fname_dtemp = op.join(tempdir, 'test.dip')
+    fname_sim = op.join(tempdir, 'test-ave.fif')
+    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
+                                   surf_ori=False, force_fixed=True)
+    evoked = read_evokeds(fname_evo)[0]
+    cov = read_cov(fname_cov)
+    n_per_hemi = 5
+    vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
+                for s in fwd['src']]
+    nv = sum(len(v) for v in vertices)
+    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
+    with warnings.catch_warnings(record=True):  # semi-def cov
+        evoked = simulate_evoked(fwd, stc, evoked, cov, snr=20,
+                                 random_state=rng)
+    # For speed, let's use a subset of channels (strange but works)
+    picks = np.sort(np.concatenate([
+        pick_types(evoked.info, meg=True, eeg=False)[::2],
+        pick_types(evoked.info, meg=False, eeg=True)[::2]]))
+    evoked.pick_channels([evoked.ch_names[p] for p in picks])
+    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
+    write_evokeds(fname_sim, evoked)
+
+    # Run MNE-C version
+    run_subprocess([
+        'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
+        '--noise', fname_cov, '--dip', fname_dtemp,
+        '--mri', fname_fwd, '--reg', '0', '--tmin', '0',
+    ])
+    dip_c = read_dipole(fname_dtemp)
+
+    # Run mne-python version
+    sphere = make_sphere_model(head_radius=0.1)
+    dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
+
+    # Sanity check: do our residuals have less power than orig data?
+    data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
+    resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
+    assert_true((data_rms > resi_rms).all())
+
+    # Compare to original points
+    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
+    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
+    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
+                            axis=0)
+    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
+                            axis=0)
+
+    # MNE-C skips the last "time" point :(
+    dip.crop(dip_c.times[0], dip_c.times[-1])
+    src_rr, src_nn = src_rr[:-1], src_nn[:-1]
+
+    # check that we did at least as well
+    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
+    for d in (dip_c, dip):
+        new = d.pos
+        diffs = new - src_rr
+        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
+        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
+        gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
+                                                     axis=1)))]
+        amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
+        gofs += [np.mean(d.gof)]
+    assert_true(dists[0] >= dists[1], 'dists: %s' % dists)
+    assert_true(corrs[0] <= corrs[1], 'corrs: %s' % corrs)
+    assert_true(gc_dists[0] >= gc_dists[1], 'gc-dists (ori): %s' % gc_dists)
+    assert_true(amp_errs[0] >= amp_errs[1], 'amplitude errors: %s' % amp_errs)
+    assert_true(gofs[0] <= gofs[1], 'gof: %s' % gofs)
+
+
+ at testing.requires_testing_data
+def test_len_index_dipoles():
+    """Test len and indexing of Dipole objects
+    """
+    dipole = read_dipole(fname_dip)
+    d0 = dipole[0]
+    d1 = dipole[:1]
+    _check_dipole(d0, 1)
+    _check_dipole(d1, 1)
+    _compare_dipoles(d0, d1)
+    mask = dipole.gof > 15
+    idx = np.where(mask)[0]
+    d_mask = dipole[mask]
+    _check_dipole(d_mask, 4)
+    _compare_dipoles(d_mask, dipole[idx])
+
+
+ at testing.requires_testing_data
+def test_min_distance_fit_dipole():
+    """Test dipole min_dist to inner_skull"""
+    subject = 'sample'
+    raw = Raw(fname_raw, preload=True)
+
+    # select eeg data
+    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
+    info = pick_info(raw.info, picks)
+
+    # Let's use cov = Identity
+    cov = read_cov(fname_cov)
+    cov['data'] = np.eye(cov['data'].shape[0])
+
+    # Simulated scal map
+    simulated_scalp_map = np.zeros(picks.shape[0])
+    simulated_scalp_map[27:34] = 1
+
+    simulated_scalp_map = simulated_scalp_map[:, None]
+
+    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
+
+    min_dist = 5.  # distance in mm
+
+    dip, residual = fit_dipole(evoked, cov, fname_bem, fname_trans,
+                               min_dist=min_dist)
+
+    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
+
+    # Constraints are not exact, so bump the minimum slightly
+    assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
+
+    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
+                  -1.)
+
+
+def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
+    """Compute dipole depth"""
+    trans = read_trans(fname_trans)
+    trans = _get_mri_head_t(trans)[0]
+    bem = read_bem_solution(fname_bem)
+    surf = _bem_find_surface(bem, 'inner_skull')
+    points = surf['rr']
+    points = apply_trans(trans['trans'], points)
+    depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
+    return np.ravel(depth)
+
+
+ at testing.requires_testing_data
+def test_accuracy():
+    """Test dipole fitting to sub-mm accuracy
+    """
+    evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
+    evoked.pick_types(meg=True, eeg=False)
+    evoked.pick_channels([c for c in evoked.ch_names[::4]])
+    bem = make_sphere_model('auto', 0.09, evoked.info,
+                            relative_radii=(0.999, 0.998, 0.997, 0.995))
+    src = read_source_spaces(fname_src)
+
+    fwd = make_forward_solution(evoked.info, None, src, bem)
+    fwd = convert_forward_solution(fwd, force_fixed=True)
+    vertices = [src[0]['vertno'], src[1]['vertno']]
+    n_vertices = sum(len(v) for v in vertices)
+    amp = 10e-9
+    data = np.eye(n_vertices + 1)[:n_vertices]
+    data[-1, -1] = 1.
+    data *= amp
+    stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
+    sim = simulate_evoked(fwd, stc, evoked.info, cov=None, snr=np.inf)
+
+    cov = make_ad_hoc_cov(evoked.info)
+    dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
+
+    ds = []
+    for vi in range(n_vertices):
+        if vi < len(vertices[0]):
+            hi = 0
+            vertno = vi
+        else:
+            hi = 1
+            vertno = vi - len(vertices[0])
+        vertno = src[hi]['vertno'][vertno]
+        rr = src[hi]['rr'][vertno]
+        d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
+        ds.append(d)
+    # make sure that our median is sub-mm and the large majority are very close
+    # (we expect some to be off by a bit e.g. because they are radial)
+    assert_true((np.percentile(ds, [50, 90]) < [0.0005, 0.002]).all())
 
-    assert_true(pos.shape[1] == 3)
-    assert_true(ori.shape[1] == 3)
-    assert_true(len(time) == len(pos))
-    assert_true(len(time) == gof.size)
-    assert_true(len(time) == amplitude.size)
+run_tests_if_main(False)
diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py
new file mode 100644
index 0000000..0d4654f
--- /dev/null
+++ b/mne/tests/test_docstring_parameters.py
@@ -0,0 +1,160 @@
+# TODO inspect for Cython (see sagenb.misc.sageinspect)
+from __future__ import print_function
+
+from nose.plugins.skip import SkipTest
+from nose.tools import assert_true
+from os import path as op
+import sys
+import inspect
+import warnings
+import imp
+
+from pkgutil import walk_packages
+from inspect import getsource
+
+import mne
+from mne.utils import run_tests_if_main
+
+public_modules = [
+    # the list of modules users need to access for all functionality
+    'mne',
+    'mne.beamformer',
+    'mne.connectivity',
+    'mne.datasets',
+    'mne.datasets.megsim',
+    'mne.datasets.sample',
+    'mne.datasets.spm_face',
+    'mne.decoding',
+    'mne.filter',
+    'mne.gui',
+    'mne.inverse_sparse',
+    'mne.io',
+    'mne.io.kit',
+    'mne.minimum_norm',
+    'mne.preprocessing',
+    'mne.realtime',
+    'mne.report',
+    'mne.simulation',
+    'mne.source_estimate',
+    'mne.source_space',
+    'mne.stats',
+    'mne.time_frequency',
+    'mne.viz',
+]
+
+docscrape_path = op.join(op.dirname(__file__), '..', '..', 'doc', 'sphinxext',
+                         'numpy_ext', 'docscrape.py')
+if op.isfile(docscrape_path):
+    docscrape = imp.load_source('docscrape', docscrape_path)
+else:
+    docscrape = None
+
+
+def get_name(func):
+    parts = []
+    module = inspect.getmodule(func)
+    if module:
+        parts.append(module.__name__)
+    if hasattr(func, 'im_class'):
+        parts.append(func.im_class.__name__)
+    parts.append(func.__name__)
+    return '.'.join(parts)
+
+
+# functions to ignore args / docstring of
+_docstring_ignores = [
+    'mne.io.write',  # always ignore these
+    'mne.fixes._in1d',  # fix function
+    'mne.gui.coregistration',  # deprecated single argument w/None
+]
+
+_tab_ignores = [
+    'mne.channels.tests.test_montage',  # demo data has a tab
+]
+
+
+def check_parameters_match(func, doc=None):
+    """Helper to check docstring, returns list of incorrect results"""
+    incorrect = []
+    name_ = get_name(func)
+    if not name_.startswith('mne.') or name_.startswith('mne.externals'):
+        return incorrect
+    if inspect.isdatadescriptor(func):
+        return incorrect
+    args, varargs, varkw, defaults = inspect.getargspec(func)
+    # drop self
+    if len(args) > 0 and args[0] == 'self':
+        args = args[1:]
+
+    if doc is None:
+        with warnings.catch_warnings(record=True) as w:
+            doc = docscrape.FunctionDoc(func)
+        if len(w):
+            raise RuntimeError('Error for %s:\n%s' % (name_, w[0]))
+    # check set
+    param_names = [name for name, _, _ in doc['Parameters']]
+    # clean up some docscrape output:
+    param_names = [name.split(':')[0].strip('` ') for name in param_names]
+    param_names = [name for name in param_names if '*' not in name]
+    if len(param_names) != len(args):
+        bad = str(sorted(list(set(param_names) - set(args)) +
+                         list(set(args) - set(param_names))))
+        if not any(d in name_ for d in _docstring_ignores) and \
+                'deprecation_wrapped' not in func.__code__.co_name:
+            incorrect += [name_ + ' arg mismatch: ' + bad]
+    else:
+        for n1, n2 in zip(param_names, args):
+            if n1 != n2:
+                incorrect += [name_ + ' ' + n1 + ' != ' + n2]
+    return incorrect
+
+
+def test_docstring_parameters():
+    """Test module docsting formatting"""
+    if docscrape is None:
+        raise SkipTest('This must be run from the mne-python source directory')
+    incorrect = []
+    for name in public_modules:
+        module = __import__(name, globals())
+        for submod in name.split('.')[1:]:
+            module = getattr(module, submod)
+        classes = inspect.getmembers(module, inspect.isclass)
+        for cname, cls in classes:
+            if cname.startswith('_'):
+                continue
+            with warnings.catch_warnings(record=True) as w:
+                cdoc = docscrape.ClassDoc(cls)
+            if len(w):
+                raise RuntimeError('Error for __init__ of %s in %s:\n%s'
+                                   % (cls, name, w[0]))
+            if hasattr(cls, '__init__'):
+                incorrect += check_parameters_match(cls.__init__, cdoc)
+            for method_name in cdoc.methods:
+                method = getattr(cls, method_name)
+                incorrect += check_parameters_match(method)
+            if hasattr(cls, '__call__'):
+                incorrect += check_parameters_match(cls.__call__)
+        functions = inspect.getmembers(module, inspect.isfunction)
+        for fname, func in functions:
+            if fname.startswith('_'):
+                continue
+            incorrect += check_parameters_match(func)
+    msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
+    if len(incorrect) > 0:
+        raise AssertionError(msg)
+
+
+def test_tabs():
+    """Test that there are no tabs in our source files"""
+    for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
+        if not ispkg and modname not in _tab_ignores:
+            # mod = importlib.import_module(modname)  # not py26 compatible!
+            __import__(modname)  # because we don't import e.g. mne.tests w/mne
+            mod = sys.modules[modname]
+            source = getsource(mod)
+            assert_true('\t' not in source,
+                        '"%s" has tabs, please remove them or add it to the'
+                        'ignore list' % modname)
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py
index f28c2e7..34e76aa 100644
--- a/mne/tests/test_epochs.py
+++ b/mne/tests/test_epochs.py
@@ -14,22 +14,27 @@ from numpy.testing import (assert_array_equal, assert_array_almost_equal,
 import numpy as np
 import copy as cp
 import warnings
+from scipy import fftpack
+import matplotlib
 
 from mne import (io, Epochs, read_events, pick_events, read_epochs,
                  equalize_channels, pick_types, pick_channels, read_evokeds,
                  write_evokeds)
-from mne.epochs import (bootstrap, equalize_epoch_counts, combine_event_ids,
-                        add_channels_epochs, EpochsArray)
-from mne.utils import (_TempDir, requires_pandas, requires_nitime,
-                       clean_warning_registry)
+from mne.epochs import (
+    bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
+    EpochsArray, concatenate_epochs, _BaseEpochs)
+from mne.utils import (_TempDir, requires_pandas, slow_test,
+                       clean_warning_registry, run_tests_if_main,
+                       requires_version)
 
 from mne.io.meas_info import create_info
 from mne.io.proj import _has_eeg_average_ref_proj
 from mne.event import merge_events
 from mne.io.constants import FIFF
-from mne.externals.six.moves import zip
-from mne.externals.six.moves import cPickle as pickle
+from mne.externals.six import text_type
+from mne.externals.six.moves import zip, cPickle as pickle
 
+matplotlib.use('Agg')  # for testing don't use X server
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -40,23 +45,238 @@ evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
 
 event_id, tmin, tmax = 1, -0.2, 0.5
 event_id_2 = 2
-raw = io.Raw(raw_fname, add_eeg_ref=False)
-events = read_events(event_name)
-picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
-                   ecg=True, eog=True, include=['STI 014'],
-                   exclude='bads')
+
+
+def _get_data():
+    raw = io.Raw(raw_fname, add_eeg_ref=False, proj=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                       ecg=True, eog=True, include=['STI 014'],
+                       exclude='bads')
+    return raw, events, picks
 
 reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
 flat = dict(grad=1e-15, mag=1e-15)
 
-tempdir = _TempDir()
-
 clean_warning_registry()  # really clean warning stack
 
 
+def test_reject():
+    """Test epochs rejection
+    """
+    raw, events, picks = _get_data()
+    # cull the list just to contain the relevant event
+    events = events[events[:, 2] == event_id, :]
+    selection = np.arange(3)
+    drop_log = [[]] * 3 + [['MEG 2443']] * 4
+    assert_raises(TypeError, pick_types, raw)
+    picks_meg = pick_types(raw.info, meg=True, eeg=False)
+    assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks, preload=False, reject='foo')
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks_meg, preload=False, reject=dict(eeg=1.))
+    assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=picks, preload=False, reject=dict(foo=1.))
+
+    data_7 = dict()
+    keep_idx = [0, 1, 2]
+    for preload in (True, False):
+        for proj in (True, False, 'delayed'):
+            # no rejection
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            preload=preload)
+            assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events))
+            assert_array_equal(epochs.selection, np.arange(len(events)))
+            assert_array_equal(epochs.drop_log, [[]] * 7)
+            if proj not in data_7:
+                data_7[proj] = epochs.get_data()
+            assert_array_equal(epochs.get_data(), data_7[proj])
+
+            # with rejection
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=reject, preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # rejection post-hoc
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events))
+            assert_array_equal(epochs.get_data(), data_7[proj])
+            epochs.drop_bad_epochs(reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_equal(len(epochs), len(epochs.get_data()))
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # rejection twice
+            reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=reject_part, preload=preload)
+            epochs.drop_bad_epochs()
+            assert_equal(len(epochs), len(events) - 1)
+            epochs.drop_bad_epochs(reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.selection, selection)
+            assert_array_equal(epochs.drop_log, drop_log)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+            # ensure that thresholds must become more stringent, not less
+            assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+            epochs.drop_bad_epochs(flat=dict(mag=1.))
+            assert_equal(len(epochs), 0)
+            assert_raises(ValueError, epochs.drop_bad_epochs,
+                          flat=dict(mag=0.))
+
+            # rejection of subset of trials (ensure array ownership)
+            reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+            epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            reject=None, preload=preload)
+            epochs = epochs[:-1]
+            epochs.drop_bad_epochs(reject=reject)
+            assert_equal(len(epochs), len(events) - 4)
+            assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
+
+
+def test_decim():
+    """Test epochs decimation
+    """
+    # First with EpochsArray
+    n_epochs, n_channels, n_times = 5, 10, 20
+    dec_1, dec_2 = 2, 3
+    decim = dec_1 * dec_2
+    sfreq = 1000.
+    sfreq_new = sfreq / decim
+    data = np.random.randn(n_epochs, n_channels, n_times)
+    events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
+    info = create_info(n_channels, sfreq, 'eeg')
+    info['lowpass'] = sfreq_new / float(decim)
+    epochs = EpochsArray(data, info, events)
+    data_epochs = epochs.decimate(decim, copy=True).get_data()
+    data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
+    assert_array_equal(data_epochs, data[:, :, ::decim])
+    assert_array_equal(data_epochs, data_epochs_2)
+
+    # Now let's do it with some real data
+    raw, events, picks = _get_data()
+    sfreq_new = raw.info['sfreq'] / decim
+    raw.info['lowpass'] = sfreq_new / 4.  # suppress aliasing warnings
+    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    preload=False)
+    assert_raises(ValueError, epochs.decimate, -1)
+    expected_data = epochs.get_data()[:, :, ::decim]
+    expected_times = epochs.times[::decim]
+    for preload in (True, False):
+        # at init
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
+                        preload=preload)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # split between init and afterward
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
+                        preload=preload).decimate(dec_2)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
+                        preload=preload).decimate(dec_1)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # split between init and afterward, with preload in between
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
+                        preload=preload)
+        epochs.load_data()
+        epochs = epochs.decimate(dec_2)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+        epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
+                        preload=preload)
+        epochs.load_data()
+        epochs = epochs.decimate(dec_1)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # decimate afterward
+        epochs = Epochs(raw, events, event_id, tmin, tmax,
+                        preload=preload).decimate(decim)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+        # decimate afterward, with preload in between
+        epochs = Epochs(raw, events, event_id, tmin, tmax,
+                        preload=preload)
+        epochs.load_data()
+        epochs.decimate(decim)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_allclose(epochs.get_data(), expected_data)
+        assert_equal(epochs.info['sfreq'], sfreq_new)
+        assert_array_equal(epochs.times, expected_times)
+
+
+def test_base_epochs():
+    """Test base epochs class
+    """
+    raw = _get_data()[0]
+    epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
+                         event_id, tmin, tmax)
+    assert_raises(NotImplementedError, epochs.get_data)
+    # events with non integers
+    assert_raises(ValueError, _BaseEpochs, raw.info, None,
+                  np.ones((1, 3), float), event_id, tmin, tmax)
+    assert_raises(ValueError, _BaseEpochs, raw.info, None,
+                  np.ones((1, 3, 2), int), event_id, tmin, tmax)
+
+
+ at requires_version('scipy', '0.14')
+def test_savgol_filter():
+    """Test savgol filtering
+    """
+    h_freq = 10.
+    raw, events = _get_data()[:2]
+    epochs = Epochs(raw, events, event_id, tmin, tmax)
+    assert_raises(RuntimeError, epochs.savgol_filter, 10.)
+    epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
+    freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
+    data = np.abs(fftpack.fft(epochs.get_data()))
+    match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
+    mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
+    epochs.savgol_filter(h_freq)
+    data_filt = np.abs(fftpack.fft(epochs.get_data()))
+    # decent in pass-band
+    assert_allclose(np.mean(data[:, :, match_mask], 0),
+                    np.mean(data_filt[:, :, match_mask], 0),
+                    rtol=1e-4, atol=1e-2)
+    # suppression in stop-band
+    assert_true(np.mean(data[:, :, mismatch_mask]) >
+                np.mean(data_filt[:, :, mismatch_mask]) * 5)
+
+
 def test_epochs_hash():
     """Test epoch hashing
     """
+    raw, events = _get_data()[:2]
     epochs = Epochs(raw, events, event_id, tmin, tmax)
     assert_raises(RuntimeError, epochs.__hash__)
     epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
@@ -72,6 +292,7 @@ def test_epochs_hash():
 
 def test_event_ordering():
     """Test event order"""
+    raw, events = _get_data()[:2]
     events2 = events.copy()
     np.random.shuffle(events2)
     for ii, eve in enumerate([events, events2]):
@@ -87,6 +308,7 @@ def test_event_ordering():
 def test_epochs_bad_baseline():
     """Test Epochs initialization with bad baseline parameters
     """
+    raw, events = _get_data()[:2]
     assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
     assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
 
@@ -94,19 +316,33 @@ def test_epochs_bad_baseline():
 def test_epoch_combine_ids():
     """Test combining event ids in epochs compared to events
     """
-    for preload in [False]:
-        epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
-                                      'd': 4, 'e': 5, 'f': 32},
-                        tmin, tmax, picks=picks, preload=preload)
-        events_new = merge_events(events, [1, 2], 12)
-        epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
-        assert_array_equal(events_new, epochs_new.events)
-        # should probably add test + functionality for non-replacement XXX
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
+                                  'd': 4, 'e': 5, 'f': 32},
+                    tmin, tmax, picks=picks, preload=False)
+    events_new = merge_events(events, [1, 2], 12)
+    epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
+    assert_equal(epochs_new['ab'].name, 'ab')
+    assert_array_equal(events_new, epochs_new.events)
+    # should probably add test + functionality for non-replacement XXX
+
+
+def test_epoch_multi_ids():
+    """Test epoch selection via multiple/partial keys
+    """
+    raw, events, picks = _get_data()
+    epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
+                                  'b/d': 4, 'a_b': 5},
+                    tmin, tmax, picks=picks, preload=False)
+    epochs_regular = epochs[['a', 'b']]
+    epochs_multi = epochs[['a/b/a', 'a/b/b']]
+    assert_array_equal(epochs_regular.events, epochs_multi.events)
 
 
 def test_read_epochs_bad_events():
     """Test epochs when events are at the beginning or the end of the file
     """
+    raw, events, picks = _get_data()
     # Event at the beginning
     epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
                     event_id, tmin, tmax, picks=picks, baseline=(None, 0))
@@ -115,7 +351,9 @@ def test_read_epochs_bad_events():
 
     epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
                     event_id, tmin, tmax, picks=picks, baseline=(None, 0))
+    assert_true(repr(epochs))  # test repr
     epochs.drop_bad_epochs()
+    assert_true(repr(epochs))
     with warnings.catch_warnings(record=True):
         evoked = epochs.average()
 
@@ -129,14 +367,28 @@ def test_read_epochs_bad_events():
     warnings.resetwarnings()
 
 
+ at slow_test
 def test_read_write_epochs():
     """Test epochs from raw files with IO as fif file
     """
+    raw, events, picks = _get_data()
+    tempdir = _TempDir()
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
+    baseline = (None, 0)
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0))
+                    baseline=baseline, preload=True)
+    epochs_orig = epochs.copy()
+    epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                          baseline=None, preload=True)
+    assert_true(epochs_no_bl.baseline is None)
     evoked = epochs.average()
     data = epochs.get_data()
 
+    # Bad tmin/tmax parameters
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
+                  baseline=None)
+
     epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
                           None, tmin, tmax, picks=picks,
                           baseline=(None, 0))
@@ -146,23 +398,34 @@ def test_read_write_epochs():
                            eog=True, exclude='bads')
     eog_ch_names = [raw.ch_names[k] for k in eog_picks]
     epochs.drop_channels(eog_ch_names)
-    assert_true(len(epochs.info['chs']) == len(epochs.ch_names)
-                == epochs.get_data().shape[1])
+    assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
+                epochs.get_data().shape[1])
     data_no_eog = epochs.get_data()
     assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
 
     # test decim kwarg
     with warnings.catch_warnings(record=True) as w:
+        # decim with lowpass
         warnings.simplefilter('always')
         epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                             baseline=(None, 0), decim=4)
         assert_equal(len(w), 1)
 
+        # decim without lowpass
+        lowpass = raw.info['lowpass']
+        raw.info['lowpass'] = None
+        epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                            baseline=(None, 0), decim=4)
+        assert_equal(len(w), 2)
+        raw.info['lowpass'] = lowpass
+
     data_dec = epochs_dec.get_data()
-    assert_array_equal(data[:, :, epochs_dec._decim_idx], data_dec)
+    assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
+                    atol=1e-12)
 
     evoked_dec = epochs_dec.average()
-    assert_array_equal(evoked.data[:, epochs_dec._decim_idx], evoked_dec.data)
+    assert_allclose(evoked.data[:, epochs_dec._decim_slice],
+                    evoked_dec.data, rtol=1e-12)
 
     n = evoked.data.shape[1]
     n_dec = evoked_dec.data.shape[1]
@@ -170,68 +433,148 @@ def test_read_write_epochs():
     assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
     assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
 
-    # test IO
-    epochs.save(op.join(tempdir, 'test-epo.fif'))
-    epochs_read = read_epochs(op.join(tempdir, 'test-epo.fif'))
-
-    assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
-    assert_array_equal(epochs_read.times, epochs.times)
-    assert_array_almost_equal(epochs_read.average().data, evoked.data)
-    assert_equal(epochs_read.proj, epochs.proj)
-    bmin, bmax = epochs.baseline
-    if bmin is None:
-        bmin = epochs.times[0]
-    if bmax is None:
-        bmax = epochs.times[-1]
-    baseline = (bmin, bmax)
-    assert_array_almost_equal(epochs_read.baseline, baseline)
-    assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
-    assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
-    assert_equal(epochs_read.event_id, epochs.event_id)
-
-    epochs.event_id.pop('1')
-    epochs.event_id.update({'a:a': 1})  # test allow for ':' in key
-    epochs.save(op.join(tempdir, 'foo-epo.fif'))
-    epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
-    assert_equal(epochs_read2.event_id, epochs.event_id)
-
-    # add reject here so some of the epochs get dropped
-    epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), reject=reject)
-    epochs.save(op.join(tempdir, 'test-epo.fif'))
-    # ensure bad events are not saved
-    epochs_read3 = read_epochs(op.join(tempdir, 'test-epo.fif'))
-    assert_array_equal(epochs_read3.events, epochs.events)
-    data = epochs.get_data()
-    assert_true(epochs_read3.events.shape[0] == data.shape[0])
-
-    # test copying loaded one (raw property)
-    epochs_read4 = epochs_read3.copy()
-    assert_array_almost_equal(epochs_read4.get_data(), data)
-    # test equalizing loaded one (drop_log property)
-    epochs_read4.equalize_event_counts(epochs.event_id)
+    # Test event access on non-preloaded data (#2345)
+
+    # due to reapplication of the proj matrix, this is our quality limit
+    # for some tests
+    tols = dict(atol=1e-3, rtol=1e-20)
+
+    raw, events, picks = _get_data()
+    events[::2, 1] = 1
+    events[1::2, 2] = 2
+    event_ids = dict(a=1, b=2)
+    for proj in (True, 'delayed', False):
+        epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
+                        baseline=(None, 0), proj=proj, reject=reject,
+                        add_eeg_ref=True)
+        data1 = epochs.get_data()
+        data2 = epochs.apply_proj().get_data()
+        assert_allclose(data1, data2, **tols)
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=False)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_allclose(epochs['a'].get_data(),
+                        epochs_read['a'].get_data(), **tols)
+        assert_allclose(epochs['b'].get_data(),
+                        epochs_read['b'].get_data(), **tols)
+
+    # ensure we don't leak file descriptors
+    epochs_read = read_epochs(temp_fname, preload=False)
+    epochs_copy = epochs_read.copy()
+    del epochs_read
+    epochs_copy.get_data()
+    with warnings.catch_warnings(record=True) as w:
+        del epochs_copy
+    assert_equal(len(w), 0)
 
-    epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
-    epochs.save('test-epo.fif')
-    epochs_read5 = read_epochs('test-epo.fif')
-    assert_array_equal(epochs_read5.selection, epochs.selection)
-    assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
+    # test IO
+    for preload in (False, True):
+        epochs = epochs_orig.copy()
+        epochs.save(temp_fname)
+        epochs_no_bl.save(temp_fname_no_bl)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        epochs_no_bl.save(temp_fname_no_bl)
+        epochs_read = read_epochs(temp_fname)
+        epochs_no_bl_read = read_epochs(temp_fname_no_bl)
+        assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
+        epochs_no_bl_read.apply_baseline(baseline)
+        assert_true(epochs_no_bl_read.baseline == baseline)
+        assert_true(str(epochs_read).startswith('<Epochs'))
+
+        assert_array_equal(epochs_no_bl_read.times, epochs.times)
+        assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
+        assert_array_almost_equal(epochs.get_data(),
+                                  epochs_no_bl_read.get_data())
+        assert_array_equal(epochs_read.times, epochs.times)
+        assert_array_almost_equal(epochs_read.average().data, evoked.data)
+        assert_equal(epochs_read.proj, epochs.proj)
+        bmin, bmax = epochs.baseline
+        if bmin is None:
+            bmin = epochs.times[0]
+        if bmax is None:
+            bmax = epochs.times[-1]
+        baseline = (bmin, bmax)
+        assert_array_almost_equal(epochs_read.baseline, baseline)
+        assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
+        assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
+        assert_equal(epochs_read.event_id, epochs.event_id)
+
+        epochs.event_id.pop('1')
+        epochs.event_id.update({'a:a': 1})  # test allow for ':' in key
+        epochs.save(op.join(tempdir, 'foo-epo.fif'))
+        epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'),
+                                   preload=preload)
+        assert_equal(epochs_read2.event_id, epochs.event_id)
+
+        # add reject here so some of the epochs get dropped
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), reject=reject)
+        epochs.save(temp_fname)
+        # ensure bad events are not saved
+        epochs_read3 = read_epochs(temp_fname, preload=preload)
+        assert_array_equal(epochs_read3.events, epochs.events)
+        data = epochs.get_data()
+        assert_true(epochs_read3.events.shape[0] == data.shape[0])
+
+        # test copying loaded one (raw property)
+        epochs_read4 = epochs_read3.copy()
+        assert_array_almost_equal(epochs_read4.get_data(), data)
+        # test equalizing loaded one (drop_log property)
+        epochs_read4.equalize_event_counts(epochs.event_id)
+
+        epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
+        epochs.save(temp_fname)
+        epochs_read5 = read_epochs(temp_fname, preload=preload)
+        assert_array_equal(epochs_read5.selection, epochs.selection)
+        assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
+        assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
 
-    # Test that one can drop channels on read file
-    epochs_read5.drop_channels(epochs_read5.ch_names[:1])
+        if preload:
+            # Test that one can drop channels on read file
+            epochs_read5.drop_channels(epochs_read5.ch_names[:1])
 
-    # test warnings on bad filenames
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
-        epochs.save(epochs_badname)
-        read_epochs(epochs_badname)
-    assert_true(len(w) == 2)
+        # test warnings on bad filenames
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
+            epochs.save(epochs_badname)
+            read_epochs(epochs_badname, preload=preload)
+        assert_true(len(w) == 2)
+
+        # test loading epochs with missing events
+        epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax,
+                        picks=picks, on_missing='ignore')
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_array_equal(epochs.events, epochs_read.events)
+        assert_equal(set(epochs.event_id.keys()),
+                     set(text_type(x) for x in epochs_read.event_id.keys()))
+
+        # test saving split epoch files
+        epochs.save(temp_fname, split_size='7MB')
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
+        assert_array_equal(epochs.events, epochs_read.events)
+        assert_array_equal(epochs.selection, epochs_read.selection)
+        assert_equal(epochs.drop_log, epochs_read.drop_log)
+
+        # Test that having a single time point works
+        epochs.load_data()
+        epochs.crop(0, 0, copy=False)
+        assert_equal(len(epochs.times), 1)
+        assert_equal(epochs.get_data().shape[-1], 1)
+        epochs.save(temp_fname)
+        epochs_read = read_epochs(temp_fname, preload=preload)
+        assert_equal(len(epochs_read.times), 1)
+        assert_equal(epochs.get_data().shape[-1], 1)
 
 
 def test_epochs_proj():
     """Test handling projection (apply proj in Raw or in Epochs)
     """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
     exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
     this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
                             eog=True, exclude=exclude)
@@ -264,10 +607,33 @@ def test_epochs_proj():
                     baseline=(None, 0), proj=True, add_eeg_ref=False)
     assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
 
+    # make sure we don't add avg ref when a custom ref has been applied
+    raw.info['custom_ref_applied'] = True
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=True)
+    assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
+
+    # From GH#2200:
+    # This has no problem
+    proj = raw.info['projs']
+    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
+                    baseline=(None, 0), proj=False)
+    epochs.info['projs'] = []
+    data = epochs.copy().add_proj(proj).apply_proj().get_data()
+    # save and reload data
+    fname_epo = op.join(tempdir, 'temp-epo.fif')
+    epochs.save(fname_epo)  # Save without proj added
+    epochs_read = read_epochs(fname_epo)
+    epochs_read.add_proj(proj)
+    epochs_read.apply_proj()  # This used to bomb
+    data_2 = epochs_read.get_data()  # Let's check the result
+    assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
+
 
 def test_evoked_arithmetic():
     """Test arithmetic of evoked data
     """
+    raw, events, picks = _get_data()
     epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                      baseline=(None, 0))
     evoked1 = epochs1.average()
@@ -288,6 +654,8 @@ def test_evoked_arithmetic():
 def test_evoked_io_from_epochs():
     """Test IO of evoked data made from epochs
     """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
     # offset our tmin so we don't get exactly a zero value when decimating
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
@@ -326,6 +694,8 @@ def test_evoked_io_from_epochs():
 def test_evoked_standard_error():
     """Test calculation and read/write of standard error
     """
+    raw, events, picks = _get_data()
+    tempdir = _TempDir()
     epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
     evoked = [epochs.average(), epochs.standard_error()]
@@ -354,6 +724,7 @@ def test_evoked_standard_error():
 def test_reject_epochs():
     """Test of epochs rejection
     """
+    raw, events, picks = _get_data()
     events1 = events[events[:, 2] == event_id]
     epochs = Epochs(raw, events1,
                     event_id, tmin, tmax, baseline=(None, 0),
@@ -378,8 +749,16 @@ def test_reject_epochs():
                     reject=reject_crazy, flat=flat)
     epochs.drop_bad_epochs()
 
-    assert_true(all(['MEG 2442' in e for e in epochs.drop_log]))
-    assert_true(all(['MEG 2443' not in e for e in epochs.drop_log]))
+    assert_true(all('MEG 2442' in e for e in epochs.drop_log))
+    assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
+
+    # Invalid reject_tmin/reject_tmax/detrend
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=1., reject_tmax=0)
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=tmin - 1, reject_tmax=1.)
+    assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
+                  reject_tmin=0., reject_tmax=tmax + 1)
 
     epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=reject, flat=flat,
@@ -391,10 +770,19 @@ def test_reject_epochs():
     assert_true(epochs.times[epochs._reject_time][0] >= 0.)
     assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
 
+    # Invalid data for _is_good_epoch function
+    epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
+    assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
+    assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
+                 (False, ['TOO_SHORT']))
+    data = epochs[0].get_data()[0]
+    assert_equal(epochs._is_good_epoch(data), (True, None))
+
 
 def test_preload_epochs():
     """Test preload of epochs
     """
+    raw, events, picks = _get_data()
     epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
                             picks=picks, baseline=(None, 0), preload=True,
                             reject=reject, flat=flat)
@@ -412,6 +800,7 @@ def test_preload_epochs():
 def test_indexing_slicing():
     """Test of indexing and slicing operations
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=False,
                     reject=reject, flat=flat)
@@ -469,6 +858,7 @@ def test_indexing_slicing():
 def test_comparision_with_c():
     """Test of average obtained vs C code
     """
+    raw, events = _get_data()[:2]
     c_evoked = read_evokeds(evoked_nf_name, condition=0)
     epochs = Epochs(raw, events, event_id, tmin, tmax,
                     baseline=None, preload=True,
@@ -486,14 +876,19 @@ def test_comparision_with_c():
 def test_crop():
     """Test of crop of epochs
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=False,
                     reject=reject, flat=flat)
+    assert_raises(RuntimeError, epochs.crop, None, 0.2)  # not preloaded
     data_normal = epochs.get_data()
 
     epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
                      picks=picks, baseline=(None, 0), preload=True,
                      reject=reject, flat=flat)
+    with warnings.catch_warnings(record=True) as w:
+        epochs2.crop(-20, 200)
+    assert_true(len(w) == 2)
 
     # indices for slicing
     tmin_window = tmin + 0.1
@@ -508,17 +903,35 @@ def test_crop():
     assert_array_equal(data2, data_normal[:, :, tmask])
     assert_array_equal(data3, data_normal[:, :, tmask])
 
+    # test time info is correct
+    epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
+                         np.ones((1, 3), int), tmin=-0.2)
+    epochs.crop(-.200, .700)
+    last_time = epochs.times[-1]
+    with warnings.catch_warnings(record=True):  # not LP filtered
+        epochs.decimate(10)
+    assert_allclose(last_time, epochs.times[-1])
+
 
 def test_resample():
     """Test of resample of epochs
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), preload=True,
+                    baseline=(None, 0), preload=False,
                     reject=reject, flat=flat)
+    assert_raises(RuntimeError, epochs.resample, 100)
+
+    epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
+                      baseline=(None, 0), preload=True,
+                      reject=reject, flat=flat)
+    epochs = epochs_o.copy()
+
     data_normal = cp.deepcopy(epochs.get_data())
     times_normal = cp.deepcopy(epochs.times)
     sfreq_normal = epochs.info['sfreq']
     # upsample by 2
+    epochs = epochs_o.copy()
     epochs.resample(sfreq_normal * 2, npad=0)
     data_up = cp.deepcopy(epochs.get_data())
     times_up = cp.deepcopy(epochs.times)
@@ -537,16 +950,23 @@ def test_resample():
     assert_array_almost_equal(data_new, data_normal, 5)
 
     # use parallel
-    epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), preload=True,
-                    reject=reject, flat=flat)
+    epochs = epochs_o.copy()
     epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
     assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
 
+    # test copy flag
+    epochs = epochs_o.copy()
+    epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
+    assert_true(epochs_resampled is not epochs)
+    epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
+    assert_true(epochs_resampled is epochs)
+
 
 def test_detrend():
     """Test detrending of epochs
     """
+    raw, events, picks = _get_data()
+
     # test first-order
     epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                       baseline=None, detrend=1)
@@ -575,10 +995,14 @@ def test_detrend():
         # There are non-M/EEG channels that should not be equal:
         assert_true(not np.allclose(a, b))
 
+    assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
+                  detrend=2)
+
 
 def test_bootstrap():
     """Test of bootstrapping of epochs
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True,
                     reject=reject, flat=flat)
@@ -590,6 +1014,7 @@ def test_bootstrap():
 def test_epochs_copy():
     """Test copy epochs
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), preload=True,
                     reject=reject, flat=flat)
@@ -608,6 +1033,7 @@ def test_epochs_copy():
 def test_iter_evoked():
     """Test the iterator for epochs -> evoked
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
 
@@ -620,6 +1046,7 @@ def test_iter_evoked():
 def test_subtract_evoked():
     """Test subtraction of Evoked from Epochs
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
 
@@ -646,39 +1073,13 @@ def test_subtract_evoked():
     # if we compute the evoked response after subtracting it we get zero
     zero_evoked = epochs.average()
     data = zero_evoked.data
-    assert_array_almost_equal(data, np.zeros_like(data), decimal=20)
-
-
- at requires_nitime
-def test_epochs_to_nitime():
-    """Test test_to_nitime
-    """
-    epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0), preload=True,
-                    reject=reject, flat=flat)
-
-    picks2 = [0, 3]
-
-    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
-                                 collapse=True, copy=True)
-    assert_true(epochs_ts.ch_names == epochs.ch_names)
-
-    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
-                                 collapse=True, copy=True)
-    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])
-
-    epochs_ts = epochs.to_nitime(picks=None, epochs_idx=[0],
-                                 collapse=False, copy=False)
-    assert_true(epochs_ts.ch_names == epochs.ch_names)
-
-    epochs_ts = epochs.to_nitime(picks=picks2, epochs_idx=None,
-                                 collapse=False, copy=False)
-    assert_true(epochs_ts.ch_names == [epochs.ch_names[k] for k in picks2])
+    assert_allclose(data, np.zeros_like(data), atol=1e-15)
 
 
 def test_epoch_eq():
     """Test epoch count equalization and condition combining
     """
+    raw, events, picks = _get_data()
     # equalizing epochs objects
     epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
     epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
@@ -753,10 +1154,24 @@ def test_epoch_eq():
     assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
     assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
 
+    # equalizing with hierarchical tags
+    epochs = Epochs(raw, events, {'a/x': 1, 'b/x': 2, 'a/y': 3, 'b/y': 4},
+                    tmin, tmax, picks=picks, reject=reject)
+    cond1, cond2 = ['a', ['b/x', 'b/y']], [['a/x', 'a/y'], 'b']
+    es = [epochs.equalize_event_counts(c)[0] for c in (cond1, cond2)]
+    assert_array_equal(es[0].events[:, 0], es[1].events[:, 0])
+    cond1, cond2 = ['a', ['b', 'b/y']], [['a/x', 'a/y'], 'x']
+    for c in (cond1, cond2):  # error b/c tag and id mix/non-orthogonal tags
+        assert_raises(ValueError, epochs.equalize_event_counts, c)
+
 
 def test_access_by_name():
     """Test accessing epochs by event name and on_missing for rare events
     """
+    tempdir = _TempDir()
+    raw, events, picks = _get_data()
+
+    # Test various invalid inputs
     assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
                   tmax, picks=picks)
     assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
@@ -765,6 +1180,9 @@ def test_access_by_name():
                   tmin, tmax, picks=picks)
     assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
                   picks=picks)
+    assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
+                  picks=picks)
+
     # Test accessing non-existent events (assumes 12345678 does not exist)
     event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
     assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
@@ -779,6 +1197,12 @@ def test_access_by_name():
         assert_true(1 <= nw <= 2)
         Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
         assert_equal(len(w), nw)
+
+    # Test constructing epochs with a list of ints as events
+    epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
+    for k, v in epochs.event_id.items():
+        assert_equal(int(k), v)
+
     epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
     assert_raises(KeyError, epochs.__getitem__, 'bar')
 
@@ -789,8 +1213,9 @@ def test_access_by_name():
     epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
                     preload=True)
     assert_raises(KeyError, epochs.__getitem__, 'bar')
-    epochs.save(op.join(tempdir, 'test-epo.fif'))
-    epochs2 = read_epochs(op.join(tempdir, 'test-epo.fif'))
+    temp_fname = op.join(tempdir, 'test-epo.fif')
+    epochs.save(temp_fname)
+    epochs2 = read_epochs(temp_fname)
 
     for ep in [epochs, epochs2]:
         data = ep['a'].get_data()
@@ -814,29 +1239,44 @@ def test_access_by_name():
     assert_array_equal(epochs.events, epochs6.events)
     assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
 
+    # Make sure we preserve names
+    assert_equal(epochs['a'].name, 'a')
+    assert_equal(epochs[['a', 'b']]['a'].name, 'a')
+
 
 @requires_pandas
-def test_as_data_frame():
+def test_to_data_frame():
     """Test epochs Pandas exporter"""
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
-    assert_raises(ValueError, epochs.as_data_frame, index=['foo', 'bar'])
-    assert_raises(ValueError, epochs.as_data_frame, index='qux')
-    assert_raises(ValueError, epochs.as_data_frame, np.arange(400))
-    df = epochs.as_data_frame()
+    assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
+    assert_raises(ValueError, epochs.to_data_frame, index='qux')
+    assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
+
+    df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
+                              picks=list(range(epochs.info['nchan'])))
+
+    # Default index and picks
+    df2 = epochs.to_data_frame()
+    assert_equal(df.index.names, df2.index.names)
+    assert_array_equal(df.columns.values, epochs.ch_names)
+
     data = np.hstack(epochs.get_data())
     assert_true((df.columns == epochs.ch_names).all())
     assert_array_equal(df.values[:, 0], data[0] * 1e13)
     assert_array_equal(df.values[:, 2], data[2] * 1e15)
     for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
-        df = epochs.as_data_frame(index=ind)
+        df = epochs.to_data_frame(index=ind)
         assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
         # test that non-indexed data were present as categorial variables
-        df.reset_index().columns[:3] == ['condition', 'epoch', 'time']
+        assert_array_equal(sorted(df.reset_index().columns[:3]),
+                           sorted(['time', 'condition', 'epoch']))
 
 
 def test_epochs_proj_mixin():
     """Test SSP proj methods from ProjMixin class
     """
+    raw, events, picks = _get_data()
     for proj in [True, False]:
         epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), proj=proj)
@@ -865,19 +1305,17 @@ def test_epochs_proj_mixin():
     # wrong proj argument
     assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
                   picks=picks, baseline=(None, 0), proj='crazy')
-    # delayed without reject params
-    assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
-                  picks=picks, baseline=(None, 0), proj='delayed', reject=None)
 
     for preload in [True, False]:
         epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), proj='delayed', preload=preload,
-                        add_eeg_ref=True, verbose=True, reject=reject)
+                        add_eeg_ref=True, reject=reject)
         epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                          baseline=(None, 0), proj=True, preload=preload,
                          add_eeg_ref=True, reject=reject)
+
         assert_allclose(epochs.copy().apply_proj().get_data()[0],
-                        epochs2.get_data()[0])
+                        epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
 
         # make sure data output is constant across repeated calls
         # e.g. drop bads
@@ -902,9 +1340,41 @@ def test_epochs_proj_mixin():
     assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
 
 
+def test_delayed_epochs():
+    """Test delayed projection
+    """
+    raw, events, picks = _get_data()
+    events = events[:10]
+    picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
+                            pick_types(raw.info, meg=False, eeg=False,
+                                       ecg=True, eog=True)])
+    picks = np.sort(picks)
+    raw.info['lowpass'] = 40.  # fake the LP info so no warnings
+    for preload in (True, False):
+        for proj in (True, False, 'delayed'):
+            for decim in (1, 3):
+                for ii in range(2):
+                    epochs = Epochs(raw, events, event_id, tmin, tmax,
+                                    picks=picks, proj=proj, reject=reject,
+                                    preload=preload, decim=decim)
+                    if ii == 1:
+                        epochs.load_data()
+                    picks_data = pick_types(epochs.info, meg=True, eeg=True)
+                    evoked = epochs.average(picks=picks_data)
+                    if proj is True:
+                        evoked.apply_proj()
+                    epochs_data = epochs.get_data().mean(axis=0)[picks_data]
+                    assert_array_equal(evoked.ch_names,
+                                       np.array(epochs.ch_names)[picks_data])
+                    assert_allclose(evoked.times, epochs.times)
+                    assert_allclose(evoked.data, epochs_data,
+                                    rtol=1e-5, atol=1e-15)
+
+
 def test_drop_epochs():
     """Test dropping of epochs.
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
     events1 = events[events[:, 2] == event_id]
@@ -935,6 +1405,7 @@ def test_drop_epochs():
 
 def test_drop_epochs_mult():
     """Test that subselecting epochs or making less epochs is equivalent"""
+    raw, events, picks = _get_data()
     for preload in [True, False]:
         epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
                          tmin, tmax, picks=picks, reject=reject,
@@ -964,6 +1435,7 @@ def test_drop_epochs_mult():
 
 def test_contains():
     """Test membership API"""
+    raw, events = _get_data()[:2]
 
     tests = [(('mag', False), ('grad', 'eeg')),
              (('grad', False), ('mag', 'eeg')),
@@ -985,9 +1457,10 @@ def test_contains():
 def test_drop_channels_mixin():
     """Test channels-dropping functionality
     """
+    raw, events = _get_data()[:2]
     # here without picks to get additional coverage
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
-                    baseline=(None, 0))
+                    baseline=(None, 0), preload=True)
     drop_ch = epochs.ch_names[:3]
     ch_names = epochs.ch_names[3:]
 
@@ -1005,10 +1478,13 @@ def test_drop_channels_mixin():
 def test_pick_channels_mixin():
     """Test channel-picking functionality
     """
+    raw, events, picks = _get_data()
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                    baseline=(None, 0))
+                    baseline=(None, 0), preload=True)
     ch_names = epochs.ch_names[:3]
-
+    epochs.preload = False
+    assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
+    epochs.preload = True
     ch_names_orig = epochs.ch_names
     dummy = epochs.pick_channels(ch_names, copy=True)
     assert_equal(ch_names, dummy.ch_names)
@@ -1019,12 +1495,17 @@ def test_pick_channels_mixin():
     assert_equal(ch_names, epochs.ch_names)
     assert_equal(len(ch_names), epochs.get_data().shape[1])
 
+    # Invalid picks
+    assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
+                  picks=[])
+
 
 def test_equalize_channels():
     """Test equalization of channels
     """
+    raw, events, picks = _get_data()
     epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
-                     baseline=(None, 0), proj=False)
+                     baseline=(None, 0), proj=False, preload=True)
     epochs2 = epochs1.copy()
     ch_names = epochs1.ch_names[2:]
     epochs1.drop_channels(epochs1.ch_names[:1])
@@ -1037,6 +1518,7 @@ def test_equalize_channels():
 
 def test_illegal_event_id():
     """Test handling of invalid events ids"""
+    raw, events, picks = _get_data()
     event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
 
     assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
@@ -1045,32 +1527,36 @@ def test_illegal_event_id():
 
 def test_add_channels_epochs():
     """Test adding channels"""
+    raw, events, picks = _get_data()
 
-    def make_epochs(picks):
+    def make_epochs(picks, proj):
         return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
-                      reject=None, preload=True, proj=False, picks=picks)
+                      reject=None, preload=True, proj=proj, picks=picks)
 
     picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
     picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
     picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
 
-    epochs = make_epochs(picks=picks)
-    epochs_meg = make_epochs(picks=picks_meg)
-    epochs_eeg = make_epochs(picks=picks_eeg)
+    for proj in (False, True):
+        epochs = make_epochs(picks=picks, proj=proj)
+        epochs_meg = make_epochs(picks=picks_meg, proj=proj)
+        epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
+        epochs.info._check_consistency()
+        epochs_meg.info._check_consistency()
+        epochs_eeg.info._check_consistency()
 
-    epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
+        epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
 
-    assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
-    assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
+        assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
+        assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
 
-    data1 = epochs.get_data()
-    data2 = epochs2.get_data()
-    data3 = np.concatenate([e.get_data() for e in
-                            [epochs_meg, epochs_eeg]], axis=1)
-    assert_array_equal(data1.shape, data2.shape)
-    assert_array_equal(data1, data3)  # XXX unrelated bug? this crashes
-                                      # when proj == True
-    assert_array_equal(data1, data2)
+        data1 = epochs.get_data()
+        data2 = epochs2.get_data()
+        data3 = np.concatenate([e.get_data() for e in
+                                [epochs_meg, epochs_eeg]], axis=1)
+        assert_array_equal(data1.shape, data2.shape)
+        assert_allclose(data1, data3, atol=1e-25)
+        assert_allclose(data1, data2, atol=1e-25)
 
     epochs_meg2 = epochs_meg.copy()
     epochs_meg2.info['meas_date'] += 10
@@ -1089,6 +1575,8 @@ def test_add_channels_epochs():
                   [epochs_meg, epochs_eeg[:2]])
 
     epochs_meg.info['chs'].pop(0)
+    epochs_meg.info['ch_names'].pop(0)
+    epochs_meg.info['nchan'] -= 1
     assert_raises(RuntimeError, add_channels_epochs,
                   [epochs_meg, epochs_eeg])
 
@@ -1104,6 +1592,7 @@ def test_add_channels_epochs():
 
     epochs_meg2 = epochs_meg.copy()
     epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
+    epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
     assert_raises(ValueError, add_channels_epochs,
                   [epochs_meg2, epochs_eeg])
 
@@ -1151,6 +1640,8 @@ def test_add_channels_epochs():
 def test_array_epochs():
     """Test creating epochs from array
     """
+    import matplotlib.pyplot as plt
+    tempdir = _TempDir()
 
     # creating
     rng = np.random.RandomState(42)
@@ -1160,11 +1651,16 @@ def test_array_epochs():
     types = ['eeg'] * 20
     info = create_info(ch_names, sfreq, types)
     events = np.c_[np.arange(1, 600, 60),
-                   np.zeros(10),
+                   np.zeros(10, int),
                    [1, 2] * 5]
     event_id = {'a': 1, 'b': 2}
-    epochs = EpochsArray(data, info, events=events, event_id=event_id,
-                         tmin=-.2)
+    epochs = EpochsArray(data, info, events, tmin, event_id)
+    assert_true(str(epochs).startswith('<EpochsArray'))
+    # From GH#1963
+    assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
+                  event_id)
+    assert_raises(ValueError, EpochsArray, data, info, events, tmin,
+                  dict(a=1))
 
     # saving
     temp_fname = op.join(tempdir, 'test-epo.fif')
@@ -1177,9 +1673,8 @@ def test_array_epochs():
     assert_array_equal(epochs.events, epochs2.events)
 
     # plotting
-    import matplotlib
-    matplotlib.use('Agg')  # for testing don't use X server
     epochs[0].plot()
+    plt.close('all')
 
     # indexing
     assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
@@ -1193,4 +1688,106 @@ def test_array_epochs():
                          reject_tmin=0.1, reject_tmax=0.2)
     assert_equal(len(epochs), len(events) - 2)
     assert_equal(epochs.drop_log[0], ['EEG 006'])
-    assert_equal(len(events), len(epochs.selection))
+    assert_equal(len(epochs.drop_log), 10)
+    assert_equal(len(epochs.events), len(epochs.selection))
+
+    # baseline
+    data = np.ones((10, 20, 300))
+    epochs = EpochsArray(data, info, events=events, event_id=event_id,
+                         tmin=-.2, baseline=(None, 0))
+    ep_data = epochs.get_data()
+    assert_array_equal(np.zeros_like(ep_data), ep_data)
+
+    # one time point
+    epochs = EpochsArray(data[:, :, :1], info, events=events,
+                         event_id=event_id, tmin=0., baseline=None)
+    assert_allclose(epochs.times, [0.])
+    assert_allclose(epochs.get_data(), data[:, :, :1])
+    epochs.save(temp_fname)
+    epochs_read = read_epochs(temp_fname)
+    assert_allclose(epochs_read.times, [0.])
+    assert_allclose(epochs_read.get_data(), data[:, :, :1])
+
+    # event as integer (#2435)
+    mask = (events[:, 2] == 1)
+    data_1 = data[mask]
+    events_1 = events[mask]
+    epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
+                         tmin=-0.2, baseline=(None, 0))
+
+
+def test_concatenate_epochs():
+    """Test concatenate epochs"""
+    raw, events, picks = _get_data()
+    epochs = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks)
+    epochs2 = epochs.copy()
+    epochs_list = [epochs, epochs2]
+    epochs_conc = concatenate_epochs(epochs_list)
+    assert_array_equal(
+        epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
+
+    expected_shape = list(epochs.get_data().shape)
+    expected_shape[0] *= 2
+    expected_shape = tuple(expected_shape)
+
+    assert_equal(epochs_conc.get_data().shape, expected_shape)
+    assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
+
+    epochs2 = epochs.copy()
+    epochs2._data = epochs2.get_data()
+    epochs2.preload = True
+    assert_raises(
+        ValueError, concatenate_epochs,
+        [epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
+
+    epochs2.times = np.delete(epochs2.times, 1)
+    assert_raises(
+        ValueError,
+        concatenate_epochs, [epochs, epochs2])
+
+    assert_equal(epochs_conc._raw, None)
+
+    # check if baseline is same for all epochs
+    epochs2.baseline = (-0.1, None)
+    assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
+
+
+def test_add_channels():
+    """Test epoch splitting / re-appending channel types
+    """
+    raw, events, picks = _get_data()
+    epoch_nopre = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks)
+    epoch = Epochs(
+        raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
+        picks=picks, preload=True)
+    epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
+    epoch_meg = epoch.pick_types(meg=True, copy=True)
+    epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
+    epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
+    epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
+    assert_true(all(ch in epoch_new.ch_names
+                    for ch in epoch_stim.ch_names + epoch_meg.ch_names))
+    epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
+
+    assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
+    assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
+    assert_true(all(ch not in epoch_new.ch_names
+                    for ch in epoch_stim.ch_names))
+
+    # Now test errors
+    epoch_badsf = epoch_eeg.copy()
+    epoch_badsf.info['sfreq'] = 3.1415927
+    epoch_eeg = epoch_eeg.crop(-.1, .1)
+
+    assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
+    assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
+    assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
+    assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
+    assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py
index 31d2ed1..2c5dd99 100644
--- a/mne/tests/test_event.py
+++ b/mne/tests/test_event.py
@@ -1,15 +1,14 @@
 import os.path as op
 import os
 
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_raises
 import numpy as np
-from numpy.testing import (assert_array_almost_equal, assert_array_equal,
-                           assert_raises)
+from numpy.testing import assert_array_almost_equal, assert_array_equal
 import warnings
 
 from mne import (read_events, write_events, make_fixed_length_events,
-                 find_events, find_stim_steps, io, pick_channels)
-from mne.utils import _TempDir
+                 find_events, pick_events, find_stim_steps, io, pick_channels)
+from mne.utils import _TempDir, run_tests_if_main
 from mne.event import define_target_events, merge_events
 
 warnings.simplefilter('always')
@@ -26,8 +25,6 @@ fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
 fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 
-tempdir = _TempDir()
-
 
 def test_add_events():
     """Test adding events to a Raw file"""
@@ -73,6 +70,7 @@ def test_merge_events():
 def test_io_events():
     """Test IO for events
     """
+    tempdir = _TempDir()
     # Test binary fif IO
     events = read_events(fname)  # Use as the gold standard
     write_events(op.join(tempdir, 'events-eve.fif'), events)
@@ -103,8 +101,10 @@ def test_io_events():
     # Test event selection
     a = read_events(op.join(tempdir, 'events-eve.fif'), include=1)
     b = read_events(op.join(tempdir, 'events-eve.fif'), include=[1])
-    c = read_events(op.join(tempdir, 'events-eve.fif'), exclude=[2, 3, 4, 5, 32])
-    d = read_events(op.join(tempdir, 'events-eve.fif'), include=1, exclude=[2, 3])
+    c = read_events(op.join(tempdir, 'events-eve.fif'),
+                    exclude=[2, 3, 4, 5, 32])
+    d = read_events(op.join(tempdir, 'events-eve.fif'), include=1,
+                    exclude=[2, 3])
     assert_array_equal(a, b)
     assert_array_equal(a, c)
     assert_array_equal(a, d)
@@ -142,14 +142,34 @@ def test_find_events():
         del os.environ['MNE_STIM_CHANNEL_1']
     events2 = find_events(raw)
     assert_array_almost_equal(events, events2)
+    # now test with mask
+    events11 = find_events(raw, mask=3)
+    events22 = read_events(fname, mask=3)
+    assert_array_equal(events11, events22)
 
     # Reset some data for ease of comparison
-    raw.first_samp = 0
+    raw._first_samps[0] = 0
     raw.info['sfreq'] = 1000
+    raw._update_times()
 
     stim_channel = 'STI 014'
     stim_channel_idx = pick_channels(raw.info['ch_names'],
-                                      include=stim_channel)
+                                     include=[stim_channel])
+
+    # test digital masking
+    raw._data[stim_channel_idx, :5] = np.arange(5)
+    raw._data[stim_channel_idx, 5:] = 0
+    # 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'
+
+    assert_raises(TypeError, find_events, raw, mask="0")
+    assert_array_equal(find_events(raw, shortest_event=1, mask=1),
+                       [[2,    0,    2], [4,    2,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=2),
+                       [[1,    0,    1], [3,    0,    1], [4,    1,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=3),
+                       [[4,    0,    4]])
+    assert_array_equal(find_events(raw, shortest_event=1, mask=4),
+                       [[1,    0,    1], [2,    1,    2], [3,    2,    3]])
 
     # test empty events channel
     raw._data[stim_channel_idx, :] = 0
@@ -192,7 +212,8 @@ def test_find_events():
                         [31, 0, 5],
                         [40, 0, 6],
                         [14399, 0, 9]])
-    assert_raises(ValueError,find_events,raw, output='step', consecutive=True)
+    assert_raises(ValueError, find_events, raw, output='step',
+                  consecutive=True)
     assert_array_equal(find_events(raw, output='step', consecutive=True,
                                    shortest_event=1),
                        [[10, 0, 5],
@@ -255,6 +276,27 @@ def test_find_events():
             os.environ['MNE_STIM_CHANNEL%s' % s] = o
 
 
+def test_pick_events():
+    """Test pick events in a events ndarray
+    """
+    events = np.array([[1, 0, 1],
+                       [2, 1, 0],
+                       [3, 0, 4],
+                       [4, 4, 2],
+                       [5, 2, 0]])
+    assert_array_equal(pick_events(events, include=[1, 4], exclude=4),
+                       [[1, 0, 1],
+                        [3, 0, 4]])
+    assert_array_equal(pick_events(events, exclude=[0, 2]),
+                       [[1, 0, 1],
+                        [3, 0, 4]])
+    assert_array_equal(pick_events(events, include=[1, 2], step=True),
+                       [[1, 0, 1],
+                        [2, 1, 0],
+                        [4, 4, 2],
+                        [5, 2, 0]])
+
+
 def test_make_fixed_length_events():
     """Test making events of a fixed length
     """
@@ -275,3 +317,23 @@ def test_define_events():
     n_target_ = events_[events_[:, 2] == 42].shape[0]
 
     assert_true(n_target_ == (n_target - n_miss))
+
+    events = np.array([[0, 0, 1],
+                       [375, 0, 2],
+                       [500, 0, 1],
+                       [875, 0, 3],
+                       [1000, 0, 1],
+                       [1375, 0, 3],
+                       [1100, 0, 1],
+                       [1475, 0, 2],
+                       [1500, 0, 1],
+                       [1875, 0, 2]])
+    true_lag_nofill = [1500., 1500., 1500.]
+    true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.]
+    n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5)
+    n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99)
+
+    assert_array_equal(true_lag_fill, lag_fill)
+    assert_array_equal(true_lag_nofill, lag_nofill)
+
+run_tests_if_main()
diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py
index fa8bd90..7918378 100644
--- a/mne/tests/test_evoked.py
+++ b/mne/tests/test_evoked.py
@@ -1,6 +1,7 @@
 # Author: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #         Denis Engemann <denis.engemann at gmail.com>
 #         Andrew Dykstra <andrew.r.dykstra at gmail.com>
+#         Mads Jensen <mje.mads at gmail.com>
 #
 # License: BSD (3-clause)
 
@@ -9,16 +10,17 @@ from copy import deepcopy
 import warnings
 
 import numpy as np
+from scipy import fftpack
 from numpy.testing import (assert_array_almost_equal, assert_equal,
                            assert_array_equal, assert_allclose)
 from nose.tools import assert_true, assert_raises, assert_not_equal
 
-from mne import (equalize_channels, pick_types, read_evoked, write_evoked,
-                 read_evokeds, write_evokeds)
+from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
+                 grand_average, combine_evoked)
 from mne.evoked import _get_peak, EvokedArray
 from mne.epochs import EpochsArray
 
-from mne.utils import _TempDir, requires_pandas, requires_nitime
+from mne.utils import _TempDir, requires_pandas, slow_test, requires_version
 
 from mne.io.meas_info import create_info
 from mne.externals.six.moves import cPickle as pickle
@@ -30,7 +32,27 @@ fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
 fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
                    'test-ave.fif.gz')
 
-tempdir = _TempDir()
+
+ at requires_version('scipy', '0.14')
+def test_savgol_filter():
+    """Test savgol filtering
+    """
+    h_freq = 10.
+    evoked = read_evokeds(fname, 0)
+    freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
+    data = np.abs(fftpack.fft(evoked.data))
+    match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
+    mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
+    assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
+    evoked.savgol_filter(h_freq)
+    data_filt = np.abs(fftpack.fft(evoked.data))
+    # decent in pass-band
+    assert_allclose(np.mean(data[:, match_mask], 0),
+                    np.mean(data_filt[:, match_mask], 0),
+                    rtol=1e-4, atol=1e-2)
+    # suppression in stop-band
+    assert_true(np.mean(data[:, mismatch_mask]) >
+                np.mean(data_filt[:, mismatch_mask]) * 5)
 
 
 def test_hash_evoked():
@@ -46,9 +68,11 @@ def test_hash_evoked():
     assert_not_equal(hash(ave), hash(ave_2))
 
 
+ at slow_test
 def test_io_evoked():
     """Test IO for evoked data (fif + gz) with integer and str args
     """
+    tempdir = _TempDir()
     ave = read_evokeds(fname, 0)
 
     write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
@@ -62,6 +86,7 @@ def test_io_evoked():
     assert_equal(ave.kind, ave2.kind)
     assert_equal(ave.last, ave2.last)
     assert_equal(ave.first, ave2.first)
+    assert_true(repr(ave))
 
     # test compressed i/o
     ave2 = read_evokeds(fname_gz, 0)
@@ -75,17 +100,6 @@ def test_io_evoked():
     ave3 = read_evokeds(fname, condition)
     assert_array_almost_equal(ave.data, ave3.data, 19)
 
-    # test deprecation warning for read_evoked and write_evoked
-    # XXX should be deleted for 0.9 release
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        ave = read_evoked(fname, setno=0)
-        assert_true(w[0].category == DeprecationWarning)
-    with warnings.catch_warnings(record=True) as w:
-        warnings.simplefilter('always')
-        write_evoked(op.join(tempdir, 'evoked-ave.fif'), ave)
-        assert_true(w[0].category == DeprecationWarning)
-
     # test read_evokeds and write_evokeds
     types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
     aves1 = read_evokeds(fname)
@@ -116,6 +130,7 @@ def test_io_evoked():
 def test_shift_time_evoked():
     """ Test for shifting of time scale
     """
+    tempdir = _TempDir()
     # Shift backward
     ave = read_evokeds(fname, 0)
     ave.shift_time(-0.1, relative=True)
@@ -156,6 +171,7 @@ def test_shift_time_evoked():
 def test_evoked_resample():
     """Test for resampling of evoked data
     """
+    tempdir = _TempDir()
     # upsample, write it out, read it in
     ave = read_evokeds(fname, 0)
     sfreq_normal = ave.info['sfreq']
@@ -196,27 +212,14 @@ def test_evoked_detrend():
                             rtol=1e-8, atol=1e-16))
 
 
- at requires_nitime
-def test_evoked_to_nitime():
-    """ Test to_nitime """
-    ave = read_evokeds(fname, 0)
-    evoked_ts = ave.to_nitime()
-    assert_equal(evoked_ts.data, ave.data)
-
-    picks2 = [1, 2]
-    ave = read_evokeds(fname, 0)
-    evoked_ts = ave.to_nitime(picks=picks2)
-    assert_equal(evoked_ts.data, ave.data[picks2])
-
-
 @requires_pandas
-def test_as_data_frame():
+def test_to_data_frame():
     """Test evoked Pandas exporter"""
     ave = read_evokeds(fname, 0)
-    assert_raises(ValueError, ave.as_data_frame, picks=np.arange(400))
-    df = ave.as_data_frame()
+    assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
+    df = ave.to_data_frame()
     assert_true((df.columns == ave.ch_names).all())
-    df = ave.as_data_frame(use_time_index=False)
+    df = ave.to_data_frame(index=None).reset_index('time')
     assert_true('time' in df.columns)
     assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
     assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
@@ -326,6 +329,14 @@ def test_pick_channels_mixin():
     assert_equal(ch_names, evoked.ch_names)
     assert_equal(len(ch_names), len(evoked.data))
 
+    evoked = read_evokeds(fname, condition=0, proj=True)
+    assert_true('meg' in evoked)
+    assert_true('eeg' in evoked)
+    evoked.pick_types(meg=False, eeg=True)
+    assert_true('meg' not in evoked)
+    assert_true('eeg' in evoked)
+    assert_true(len(evoked.ch_names) == 60)
+
 
 def test_equalize_channels():
     """Test equalization of channels
@@ -341,9 +352,67 @@ def test_equalize_channels():
         assert_equal(ch_names, e.ch_names)
 
 
+def test_evoked_arithmetic():
+    """Test evoked arithmetic
+    """
+    ev = read_evokeds(fname, condition=0)
+    ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
+    ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
+
+    # combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
+    # data should be added according to their `nave` weights
+    # nave = ev1.nave + ev2.nave
+    ev = ev1 + ev2
+    assert_equal(ev.nave, ev1.nave + ev2.nave)
+    assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
+    ev = ev1 - ev2
+    assert_equal(ev.nave, ev1.nave + ev2.nave)
+    assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
+    assert_allclose(ev.data, np.ones_like(ev1.data))
+
+    # default comment behavior if evoked.comment is None
+    old_comment1 = ev1.comment
+    old_comment2 = ev2.comment
+    ev1.comment = None
+    with warnings.catch_warnings(record=True):
+        warnings.simplefilter('always')
+        ev = ev1 - ev2
+        assert_equal(ev.comment, 'unknown')
+    ev1.comment = old_comment1
+    ev2.comment = old_comment2
+
+    # equal weighting
+    ev = combine_evoked([ev1, ev2], weights='equal')
+    assert_allclose(ev.data, np.zeros_like(ev1.data))
+
+    # combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
+    ev = combine_evoked([ev1, ev2], weights=[1, 0])
+    assert_equal(ev.nave, ev1.nave)
+    assert_allclose(ev.data, ev1.data)
+
+    # simple subtraction (like in oddball)
+    ev = combine_evoked([ev1, ev2], weights=[1, -1])
+    assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
+
+    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
+    assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
+
+    # grand average
+    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
+    ch_names = evoked1.ch_names[2:]
+    evoked1.info['bads'] = ['EEG 008']  # test interpolation
+    evoked1.drop_channels(evoked1.ch_names[:1])
+    evoked2.drop_channels(evoked2.ch_names[1:2])
+    gave = grand_average([evoked1, evoked2])
+    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
+    assert_equal(ch_names, gave.ch_names)
+    assert_equal(gave.nave, 2)
+
+
 def test_array_epochs():
     """Test creating evoked from array
     """
+    tempdir = _TempDir()
 
     # creating
     rng = np.random.RandomState(42)
@@ -382,3 +451,33 @@ def test_array_epochs():
     types = ['eeg'] * 19
     info = create_info(ch_names, sfreq, types)
     assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
+
+
+def test_add_channels():
+    """Test evoked splitting / re-appending channel types
+    """
+    evoked = read_evokeds(fname, condition=0)
+    evoked.info['buffer_size_sec'] = None
+    evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
+    evoked_meg = evoked.pick_types(meg=True, copy=True)
+    evoked_stim = evoked.pick_types(meg=False, stim=True, copy=True)
+    evoked_eeg_meg = evoked.pick_types(meg=True, eeg=True, copy=True)
+    evoked_new = evoked_meg.add_channels([evoked_eeg, evoked_stim], copy=True)
+    assert_true(all(ch in evoked_new.ch_names
+                    for ch in evoked_stim.ch_names + evoked_meg.ch_names))
+    evoked_new = evoked_meg.add_channels([evoked_eeg], copy=True)
+
+    assert_true(ch in evoked_new.ch_names for ch in evoked.ch_names)
+    assert_array_equal(evoked_new.data, evoked_eeg_meg.data)
+    assert_true(all(ch not in evoked_new.ch_names
+                    for ch in evoked_stim.ch_names))
+
+    # Now test errors
+    evoked_badsf = evoked_eeg.copy()
+    evoked_badsf.info['sfreq'] = 3.1415927
+    evoked_eeg = evoked_eeg.crop(-.1, .1)
+
+    assert_raises(RuntimeError, evoked_meg.add_channels, [evoked_badsf])
+    assert_raises(AssertionError, evoked_meg.add_channels, [evoked_eeg])
+    assert_raises(ValueError, evoked_meg.add_channels, [evoked_meg])
+    assert_raises(AssertionError, evoked_meg.add_channels, evoked_badsf)
diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py
index 2823f94..d09d39b 100644
--- a/mne/tests/test_filter.py
+++ b/mne/tests/test_filter.py
@@ -1,66 +1,122 @@
 import numpy as np
 from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
-                           assert_array_equal)
+                           assert_array_equal, assert_allclose)
 from nose.tools import assert_equal, assert_true, assert_raises
 import os.path as op
 import warnings
 from scipy.signal import resample as sp_resample
 
 from mne.filter import (band_pass_filter, high_pass_filter, low_pass_filter,
-                        band_stop_filter, resample, construct_iir_filter,
-                        notch_filter, detrend)
+                        band_stop_filter, resample, _resample_stim_channels,
+                        construct_iir_filter, notch_filter, detrend,
+                        _overlap_add_filter, _smart_pad)
 
 from mne import set_log_file
-from mne.utils import _TempDir, sum_squared
-from mne.cuda import requires_cuda
+from mne.utils import _TempDir, sum_squared, run_tests_if_main, slow_test
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-tempdir = _TempDir()
-log_file = op.join(tempdir, 'temp_log.txt')
+
+def test_1d_filter():
+    """Test our private overlap-add filtering function"""
+    rng = np.random.RandomState(0)
+    # make some random signals and filters
+    for n_signal in (1, 2, 5, 10, 20, 40, 100, 200, 400, 1000, 2000):
+        x = rng.randn(n_signal)
+        for n_filter in (2, 5, 10, 20, 40, 100, 200, 400, 1000, 2000):
+            # Don't test n_filter == 1 because scipy can't handle it.
+            if n_filter > n_signal:
+                continue  # only equal or lesser lengths supported
+            for filter_type in ('identity', 'random'):
+                if filter_type == 'random':
+                    h = rng.randn(n_filter)
+                else:  # filter_type == 'identity'
+                    h = np.concatenate([[1.], np.zeros(n_filter - 1)])
+                # ensure we pad the signal the same way for both filters
+                n_pad = max(min(n_filter, n_signal - 1), 0)
+                x_pad = _smart_pad(x, n_pad)
+                for zero_phase in (True, False):
+                    # compute our expected result the slow way
+                    if zero_phase:
+                        x_expected = np.convolve(x_pad, h)[::-1]
+                        x_expected = np.convolve(x_expected, h)[::-1]
+                        x_expected = x_expected[len(h) - 1:-(len(h) - 1)]
+                    else:
+                        x_expected = np.convolve(x_pad, h)
+                        x_expected = x_expected[:-(len(h) - 1)]
+                    # remove padding
+                    if n_pad > 0:
+                        x_expected = x_expected[n_pad:-n_pad]
+                    # make sure we actually set things up reasonably
+                    if filter_type == 'identity':
+                        assert_allclose(x_expected, x)
+                    # compute our version
+                    for n_fft in (None, 32, 128, 129, 1023, 1024, 1025, 2048):
+                        # need to use .copy() b/c signal gets modified inplace
+                        x_copy = x[np.newaxis, :].copy()
+                        if (n_fft is not None and n_fft < 2 * n_filter - 1 and
+                                zero_phase):
+                            assert_raises(ValueError, _overlap_add_filter,
+                                          x_copy, h, n_fft, zero_phase)
+                        elif (n_fft is not None and n_fft < n_filter and not
+                                zero_phase):
+                            assert_raises(ValueError, _overlap_add_filter,
+                                          x_copy, h, n_fft, zero_phase)
+                        else:
+                            # bad len warning
+                            with warnings.catch_warnings(record=True):
+                                x_filtered = _overlap_add_filter(
+                                    x_copy, h, n_fft, zero_phase)[0]
+                            assert_allclose(x_expected, x_filtered)
 
 
 def test_iir_stability():
     """Test IIR filter stability check
     """
     sig = np.empty(1000)
-    fs = 1000
+    sfreq = 1000
     # This will make an unstable filter, should throw RuntimeError
-    assert_raises(RuntimeError, high_pass_filter, sig, fs, 0.6,
+    assert_raises(RuntimeError, high_pass_filter, sig, sfreq, 0.6,
                   method='iir', iir_params=dict(ftype='butter', order=8))
     # can't pass iir_params if method='fir'
-    assert_raises(ValueError, high_pass_filter, sig, fs, 0.1,
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
                   method='fir', iir_params=dict(ftype='butter', order=2))
     # method must be string
-    assert_raises(TypeError, high_pass_filter, sig, fs, 0.1,
+    assert_raises(TypeError, high_pass_filter, sig, sfreq, 0.1,
                   method=1)
     # unknown method
-    assert_raises(ValueError, high_pass_filter, sig, fs, 0.1,
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
                   method='blah')
     # bad iir_params
-    assert_raises(ValueError, high_pass_filter, sig, fs, 0.1,
+    assert_raises(ValueError, high_pass_filter, sig, sfreq, 0.1,
                   method='fir', iir_params='blah')
 
+    # should pass because dafault trans_bandwidth is not relevant
+    high_pass_filter(sig, 250, 0.5, method='iir',
+                     iir_params=dict(ftype='butter', order=6))
+
 
 def test_notch_filters():
     """Test notch filters
     """
-    # let's use an ugly, prime Fs for fun
-    Fs = 487.0
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    # let's use an ugly, prime sfreq for fun
+    sfreq = 487.0
     sig_len_secs = 20
-    t = np.arange(0, int(sig_len_secs * Fs)) / Fs
+    t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq
     freqs = np.arange(60, 241, 60)
 
     # make a "signal"
     rng = np.random.RandomState(0)
-    a = rng.randn(int(sig_len_secs * Fs))
+    a = rng.randn(int(sig_len_secs * sfreq))
     orig_power = np.sqrt(np.mean(a ** 2))
     # make line noise
     a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0)
 
     # only allow None line_freqs with 'spectrum_fit' mode
-    assert_raises(ValueError, notch_filter, a, Fs, None, 'fft')
-    assert_raises(ValueError, notch_filter, a, Fs, None, 'iir')
+    assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft')
+    assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir')
     methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir']
     filter_lengths = [None, None, None, 8192, None]
     line_freqs = [None, freqs, freqs, freqs, freqs]
@@ -69,7 +125,7 @@ def test_notch_filters():
         if lf is None:
             set_log_file(log_file, overwrite=True)
 
-        b = notch_filter(a, Fs, lf, filter_length=fl, method=meth,
+        b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth,
                          verbose='INFO')
 
         if lf is None:
@@ -99,48 +155,85 @@ def test_resample():
     x_3_rs = resample(x_3, 1, 2, 10, 0)
     assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
 
+    # make sure we cast to array if necessary
+    assert_array_equal(resample([0, 0], 2, 1), [0., 0., 0., 0.])
+
+
+def test_resample_stim_channel():
+    """Test resampling of stim channels"""
+
+    # Downsampling
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 2),
+        [[1, 0, 2, 0]])
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 1.5),
+        [[1, 0, 0, 2, 0]])
+    assert_array_equal(
+        _resample_stim_channels([1, 0, 0, 1, 2, 0, 0, 1], 1, 2),
+        [[1, 1, 2, 1]])
+
+    # Upsampling
+    assert_array_equal(
+        _resample_stim_channels([1, 2, 3], 2, 1), [[1, 1, 2, 2, 3, 3]])
+    assert_array_equal(
+        _resample_stim_channels([1, 2, 3], 2.5, 1), [[1, 1, 1, 2, 2, 3, 3, 3]])
+
+    # Proper number of samples in stim channel resampling from io/base.py
+    data_chunk = np.zeros((1, 315600))
+    for new_data_len in (52598, 52599, 52600, 52601, 315599, 315600):
+        new_data = _resample_stim_channels(data_chunk, new_data_len,
+                                           data_chunk.shape[1])
+        assert_equal(new_data.shape[1], new_data_len)
 
+
+ at slow_test
 def test_filters():
     """Test low-, band-, high-pass, and band-stop filters plus resampling
     """
-    Fs = 500
+    sfreq = 500
     sig_len_secs = 30
 
-    a = np.random.randn(2, sig_len_secs * Fs)
+    a = np.random.randn(2, sig_len_secs * sfreq)
 
     # let's test our catchers
     for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
-        assert_raises(ValueError, band_pass_filter, a, Fs, 4, 8,
+        assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8,
                       filter_length=fl)
-    for nj in ['blah', 0.5, 0]:
-        assert_raises(ValueError, band_pass_filter, a, Fs, 4, 8, n_jobs=nj)
-    assert_raises(ValueError, band_pass_filter, a, Fs, 4, Fs / 2.)  # > Nyq/2
-    assert_raises(ValueError, low_pass_filter, a, Fs, Fs / 2.)  # > Nyq/2
+    for nj in ['blah', 0.5]:
+        assert_raises(ValueError, band_pass_filter, a, sfreq, 4, 8, n_jobs=nj)
+    # > Nyq/2
+    assert_raises(ValueError, band_pass_filter, a, sfreq, 4, sfreq / 2.)
+    assert_raises(ValueError, low_pass_filter, a, sfreq, sfreq / 2.)
     # check our short-filter warning:
     with warnings.catch_warnings(record=True) as w:
         # Warning for low attenuation
-        band_pass_filter(a, Fs, 1, 8, filter_length=1024)
+        band_pass_filter(a, sfreq, 1, 8, filter_length=1024)
         # Warning for too short a filter
-        band_pass_filter(a, Fs, 1, 8, filter_length='0.5s')
+        band_pass_filter(a, sfreq, 1, 8, filter_length='0.5s')
     assert_true(len(w) >= 2)
 
     # try new default and old default
     for fl in ['10s', '5000ms', None]:
-        bp = band_pass_filter(a, Fs, 4, 8, filter_length=fl)
-        bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, filter_length=fl)
-        lp = low_pass_filter(a, Fs, 8, filter_length=fl, n_jobs=2)
-        hp = high_pass_filter(lp, Fs, 4, filter_length=fl)
+        bp = band_pass_filter(a, sfreq, 4, 8, filter_length=fl)
+        bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, filter_length=fl)
+        lp = low_pass_filter(a, sfreq, 8, filter_length=fl, n_jobs=2)
+        hp = high_pass_filter(lp, sfreq, 4, filter_length=fl)
         assert_array_almost_equal(hp, bp, 2)
         assert_array_almost_equal(bp + bs, a, 1)
 
     # Overlap-add filtering with a fixed filter length
     filter_length = 8192
-    bp_oa = band_pass_filter(a, Fs, 4, 8, filter_length)
-    bs_oa = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, filter_length)
-    lp_oa = low_pass_filter(a, Fs, 8, filter_length)
-    hp_oa = high_pass_filter(lp_oa, Fs, 4, filter_length)
+    bp_oa = band_pass_filter(a, sfreq, 4, 8, filter_length)
+    bs_oa = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, filter_length)
+    lp_oa = low_pass_filter(a, sfreq, 8, filter_length)
+    hp_oa = high_pass_filter(lp_oa, sfreq, 4, filter_length)
     assert_array_almost_equal(hp_oa, bp_oa, 2)
-    assert_array_almost_equal(bp_oa + bs_oa, a, 2)
+    # Our filters are no longer quite complementary with linear rolloffs :(
+    # this is the tradeoff for stability of the filtering
+    # obtained by directly using the result of firwin2 instead of
+    # modifying it...
+    assert_array_almost_equal(bp_oa + bs_oa, a, 1)
 
     # The two methods should give the same result
     # As filtering for short signals uses a circular convolution (FFT) and
@@ -169,9 +262,9 @@ def test_filters():
                               bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
 
     # make sure we don't alias
-    t = np.array(list(range(Fs * sig_len_secs))) / float(Fs)
+    t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
     # make sinusoid close to the Nyquist frequency
-    sig = np.sin(2 * np.pi * Fs / 2.2 * t)
+    sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
     # signal should disappear with 2x downsampling
     sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
     assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
@@ -187,32 +280,63 @@ def test_filters():
     assert_true(iir_params['a'].size - 1 == 4)
     assert_true(iir_params['b'].size - 1 == 4)
 
+    # check that picks work for 3d array with one channel and picks=[0]
+    a = np.random.randn(5 * sfreq, 5 * sfreq)
+    b = a[:, None, :]
+
+    with warnings.catch_warnings(record=True) as w:
+        a_filt = band_pass_filter(a, sfreq, 4, 8)
+        b_filt = band_pass_filter(b, sfreq, 4, 8, picks=[0])
+
+    assert_array_equal(a_filt[:, None, :], b_filt)
+
+    # check for n-dimensional case
+    a = np.random.randn(2, 2, 2, 2)
+    assert_raises(ValueError, band_pass_filter, a, sfreq, Fp1=4, Fp2=8,
+                  picks=np.array([0, 1]))
+
+    # test that our overlap-add filtering doesn't introduce strange
+    # artifacts (from mne_analyze mailing list 2015/06/25)
+    N = 300
+    sfreq = 100.
+    lp = 10.
+    sine_freq = 1.
+    x = np.ones(N)
+    x += np.sin(2 * np.pi * sine_freq * np.arange(N) / sfreq)
+    with warnings.catch_warnings(record=True):  # filter attenuation
+        x_filt = low_pass_filter(x, sfreq, lp, '1s')
+    # the firwin2 function gets us this close
+    assert_allclose(x, x_filt, rtol=1e-3, atol=1e-3)
+
 
- at requires_cuda
 def test_cuda():
     """Test CUDA-based filtering
     """
     # NOTE: don't make test_cuda() the last test, or pycuda might spew
     # some warnings about clean-up failing
-    Fs = 500
+    # Also, using `n_jobs='cuda'` on a non-CUDA system should be fine,
+    # as it should fall back to using n_jobs=1.
+    tempdir = _TempDir()
+    log_file = op.join(tempdir, 'temp_log.txt')
+    sfreq = 500
     sig_len_secs = 20
-    a = np.random.randn(sig_len_secs * Fs)
+    a = np.random.randn(sig_len_secs * sfreq)
 
     set_log_file(log_file, overwrite=True)
     for fl in ['10s', None, 2048]:
-        bp = band_pass_filter(a, Fs, 4, 8, n_jobs=1, filter_length=fl)
-        bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs=1,
+        bp = band_pass_filter(a, sfreq, 4, 8, n_jobs=1, filter_length=fl)
+        bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs=1,
                               filter_length=fl)
-        lp = low_pass_filter(a, Fs, 8, n_jobs=1, filter_length=fl)
-        hp = high_pass_filter(lp, Fs, 4, n_jobs=1, filter_length=fl)
+        lp = low_pass_filter(a, sfreq, 8, n_jobs=1, filter_length=fl)
+        hp = high_pass_filter(lp, sfreq, 4, n_jobs=1, filter_length=fl)
 
-        bp_c = band_pass_filter(a, Fs, 4, 8, n_jobs='cuda', filter_length=fl,
-                                verbose='INFO')
-        bs_c = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs='cuda',
+        bp_c = band_pass_filter(a, sfreq, 4, 8, n_jobs='cuda',
+                                filter_length=fl, verbose='INFO')
+        bs_c = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs='cuda',
                                 filter_length=fl, verbose='INFO')
-        lp_c = low_pass_filter(a, Fs, 8, n_jobs='cuda', filter_length=fl,
+        lp_c = low_pass_filter(a, sfreq, 8, n_jobs='cuda', filter_length=fl,
                                verbose='INFO')
-        hp_c = high_pass_filter(lp, Fs, 4, n_jobs='cuda', filter_length=fl,
+        hp_c = high_pass_filter(lp, sfreq, 4, n_jobs='cuda', filter_length=fl,
                                 verbose='INFO')
 
         assert_array_almost_equal(bp, bp_c, 12)
@@ -224,17 +348,23 @@ def test_cuda():
     set_log_file()
     with open(log_file) as fid:
         out = fid.readlines()
+    # triage based on whether or not we actually expected to use CUDA
+    from mne.cuda import _cuda_capable  # allow above funs to set it
+    tot = 12 if _cuda_capable else 0
     assert_true(sum(['Using CUDA for FFT FIR filtering' in o
-                     for o in out]) == 12)
+                     for o in out]) == tot)
 
     # check resampling
-    a = np.random.RandomState(0).randn(3, sig_len_secs * Fs)
+    a = np.random.RandomState(0).randn(3, sig_len_secs * sfreq)
     a1 = resample(a, 1, 2, n_jobs=2, npad=0)
     a2 = resample(a, 1, 2, n_jobs='cuda', npad=0)
     a3 = resample(a, 2, 1, n_jobs=2, npad=0)
     a4 = resample(a, 2, 1, n_jobs='cuda', npad=0)
     assert_array_almost_equal(a3, a4, 14)
     assert_array_almost_equal(a1, a2, 14)
+    assert_array_equal(resample([0, 0], 2, 1, n_jobs='cuda'), [0., 0., 0., 0.])
+    assert_array_equal(resample(np.zeros(2, np.float32), 2, 1, n_jobs='cuda'),
+                       [0., 0., 0., 0.])
 
 
 def test_detrend():
@@ -244,3 +374,6 @@ def test_detrend():
     assert_array_almost_equal(detrend(x, 1), np.zeros_like(x))
     x = np.ones(10)
     assert_array_almost_equal(detrend(x, 0), np.zeros_like(x))
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_fixes.py b/mne/tests/test_fixes.py
index 96e85b4..eaa9fa3 100644
--- a/mne/tests/test_fixes.py
+++ b/mne/tests/test_fixes.py
@@ -5,15 +5,18 @@
 
 import numpy as np
 
-from nose.tools import assert_equal, assert_raises
+from nose.tools import assert_equal, assert_raises, assert_true
 from numpy.testing import assert_array_equal
 from distutils.version import LooseVersion
-from scipy import signal
+from scipy import signal, sparse
 
-from ..fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
-                     _Counter, _unique, _bincount, _digitize)
-from ..fixes import _firwin2 as mne_firwin2
-from ..fixes import _filtfilt as mne_filtfilt
+from mne.utils import run_tests_if_main
+from mne.fixes import (_in1d, _tril_indices, _copysign, _unravel_index,
+                       _Counter, _unique, _bincount, _digitize,
+                       _sparse_block_diag, _matrix_rank, _meshgrid,
+                       _isclose)
+from mne.fixes import _firwin2 as mne_firwin2
+from mne.fixes import _filtfilt as mne_filtfilt
 
 
 def test_counter():
@@ -21,13 +24,16 @@ def test_counter():
     import collections
     try:
         Counter = collections.Counter
-    except:
+    except Exception:
         pass
     else:
         a = Counter([1, 2, 1, 3])
         b = _Counter([1, 2, 1, 3])
+        c = _Counter()
+        c.update(b)
         for key, count in zip([1, 2, 3], [2, 1, 1]):
             assert_equal(a[key], b[key])
+            assert_equal(a[key], c[key])
 
 
 def test_unique():
@@ -140,3 +146,49 @@ def test_filtfilt():
     # Filter with an impulse
     y = mne_filtfilt([1, 0], [1, 0], x, padlen=0)
     assert_array_equal(x, y)
+
+
+def test_sparse_block_diag():
+    """Test sparse block diag replacement"""
+    x = _sparse_block_diag([sparse.eye(2, 2), sparse.eye(2, 2)])
+    x = x - sparse.eye(4, 4)
+    x.eliminate_zeros()
+    assert_equal(len(x.data), 0)
+
+
+def test_rank():
+    """Test rank replacement"""
+    assert_equal(_matrix_rank(np.ones(10)), 1)
+    assert_equal(_matrix_rank(np.eye(10)), 10)
+    assert_equal(_matrix_rank(np.ones((10, 10))), 1)
+    assert_raises(TypeError, _matrix_rank, np.ones((10, 10, 10)))
+
+
+def test_meshgrid():
+    """Test meshgrid replacement
+    """
+    a = np.arange(10)
+    b = np.linspace(0, 1, 5)
+    a_grid, b_grid = _meshgrid(a, b, indexing='ij')
+    for grid in (a_grid, b_grid):
+        assert_equal(grid.shape, (a.size, b.size))
+    a_grid, b_grid = _meshgrid(a, b, indexing='xy', copy=True)
+    for grid in (a_grid, b_grid):
+        assert_equal(grid.shape, (b.size, a.size))
+    assert_raises(TypeError, _meshgrid, a, b, foo='a')
+    assert_raises(ValueError, _meshgrid, a, b, indexing='foo')
+
+
+def test_isclose():
+    """Test isclose replacement
+    """
+    a = np.random.RandomState(0).randn(10)
+    b = a.copy()
+    assert_true(_isclose(a, b).all())
+    a[0] = np.inf
+    b[0] = np.inf
+    a[-1] = np.nan
+    b[-1] = np.nan
+    assert_true(_isclose(a, b, equal_nan=True).all())
+
+run_tests_if_main()
diff --git a/mne/tests/test_hdf5.py b/mne/tests/test_hdf5.py
deleted file mode 100644
index 893dc47..0000000
--- a/mne/tests/test_hdf5.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-from os import path as op
-from nose.tools import assert_raises, assert_true, assert_equal
-
-import numpy as np
-
-from mne._hdf5 import write_hdf5, read_hdf5
-from mne.utils import requires_pytables, _TempDir, object_diff
-
-tempdir = _TempDir()
-
-
- at requires_pytables()
-def test_hdf5():
-    """Test HDF5 IO
-    """
-    test_file = op.join(tempdir, 'test.hdf5')
-    x = dict(a=dict(b=np.zeros(3)), c=np.zeros(2, np.complex128),
-             d=[dict(e=(1, -2., 'hello', u'goodbyeu\u2764')), None])
-    write_hdf5(test_file, 1)
-    assert_equal(read_hdf5(test_file), 1)
-    assert_raises(IOError, write_hdf5, test_file, x)  # file exists
-    write_hdf5(test_file, x, overwrite=True)
-    assert_raises(IOError, read_hdf5, test_file + 'FOO')  # not found
-    xx = read_hdf5(test_file)
-    assert_true(object_diff(x, xx) == '')  # no assert_equal, ugly output
diff --git a/mne/tests/test_import_nesting.py b/mne/tests/test_import_nesting.py
new file mode 100644
index 0000000..36d0a20
--- /dev/null
+++ b/mne/tests/test_import_nesting.py
@@ -0,0 +1,53 @@
+import sys
+from subprocess import Popen, PIPE
+
+from mne.utils import run_tests_if_main, requires_version
+
+
+run_script = """
+from __future__ import print_function
+
+import sys
+import mne
+
+out = []
+
+# check scipy
+ok_scipy_submodules = set(['scipy', 'numpy',  # these appear in old scipy
+                           'fftpack', 'lib', 'linalg',
+                           'misc', 'sparse', 'version'])
+scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys()
+                       if x.startswith('scipy.') and '__' not in x and
+                       not x.split('.')[1].startswith('_'))
+bad = scipy_submodules - ok_scipy_submodules
+if len(bad) > 0:
+    out.append('Found un-nested scipy submodules: %s' % list(bad))
+
+# check sklearn and others
+_sklearn = _pandas = _nose = False
+for x in sys.modules.keys():
+    if x.startswith('sklearn') and not _sklearn:
+        out.append('Found un-nested sklearn import')
+        _sklearn = True
+    if x.startswith('pandas') and not _pandas:
+        out.append('Found un-nested pandas import')
+        _pandas = True
+    if x.startswith('nose') and not _nose:
+        out.append('Found un-nested nose import')
+        _nose = True
+if len(out) > 0:
+    print('\\n' + '\\n'.join(out), end='')
+    exit(1)
+"""
+
+
+ at requires_version('scipy', '0.11')  # old ones not organized properly
+def test_module_nesting():
+    """Test that module imports are necessary
+    """
+    proc = Popen([sys.executable, '-c', run_script], stdout=PIPE, stderr=PIPE)
+    stdout, stderr = proc.communicate()
+    if proc.returncode:
+        raise AssertionError(stdout)
+
+run_tests_if_main()
diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py
index be93f3c..99a5c74 100644
--- a/mne/tests/test_label.py
+++ b/mne/tests/test_label.py
@@ -1,27 +1,39 @@
 import os
 import os.path as op
-from ..externals.six.moves import cPickle as pickle
+import shutil
 import glob
 import warnings
+import sys
 
 import numpy as np
+from scipy import sparse
+
 from numpy.testing import assert_array_equal, assert_array_almost_equal
-from nose.tools import assert_equal, assert_true, assert_raises
+from nose.tools import assert_equal, assert_true, assert_false, assert_raises
 
-from mne.datasets import sample
-from mne import (label_time_courses, read_label, stc_to_label,
-                 read_source_estimate, read_source_spaces, grow_labels,
-                 read_labels_from_annot, write_labels_to_annot, split_label)
+from mne.datasets import testing
+from mne import (read_label, stc_to_label, read_source_estimate,
+                 read_source_spaces, grow_labels, read_labels_from_annot,
+                 write_labels_to_annot, split_label, spatial_tris_connectivity,
+                 read_surface)
 from mne.label import Label, _blend_colors
-from mne.utils import requires_mne, run_subprocess, _TempDir, requires_sklearn
+from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
+                       run_tests_if_main, slow_test)
 from mne.fixes import digitize, in1d, assert_is, assert_is_not
+from mne.label import _n_colors
+from mne.source_space import SourceSpaces
+from mne.source_estimate import mesh_edges
+from mne.externals.six import string_types
+from mne.externals.six.moves import cPickle as pickle
+
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
 src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
-stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
+stc_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-lh.stc')
 real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
                            'Aud-lh.label')
 real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
@@ -29,14 +41,14 @@ real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
 v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
 
 fwd_fname = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-eeg-oct-6p-fwd.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
 src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
                         'fsaverage-ico-5-src.fif')
+label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
 
 test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
 label_fname = op.join(test_path, 'test-lh.label')
 label_rh_fname = op.join(test_path, 'test-rh.label')
-tempdir = _TempDir()
 
 # This code was used to generate the "fake" test labels:
 # for hemi in ['lh', 'rh']:
@@ -45,8 +57,105 @@ tempdir = _TempDir()
 #    label.save(op.join(test_path, 'test-%s.label' % hemi))
 
 
-def assert_labels_equal(l0, l1, decimal=5):
-    for attr in ['comment', 'hemi', 'subject', 'color']:
+# XXX : this was added for backward compat and keep the old test_label_in_src
+def _stc_to_label(stc, src, smooth, subjects_dir=None):
+    """Compute a label from the non-zero sources in an stc object.
+
+    Parameters
+    ----------
+    stc : SourceEstimate
+        The source estimates.
+    src : SourceSpaces | str | None
+        The source space over which the source estimates are defined.
+        If it's a string it should the subject name (e.g. fsaverage).
+        Can be None if stc.subject is not None.
+    smooth : int
+        Number of smoothing iterations.
+    subjects_dir : str | None
+        Path to SUBJECTS_DIR if it is not set in the environment.
+
+    Returns
+    -------
+    labels : list of Labels | list of list of Labels
+        The generated labels. If connected is False, it returns
+        a list of Labels (one per hemisphere). If no Label is available
+        in a hemisphere, None is returned. If connected is True,
+        it returns for each hemisphere a list of connected labels
+        ordered in decreasing order depending of the maximum value in the stc.
+        If no Label is available in an hemisphere, an empty list is returned.
+    """
+    src = stc.subject if src is None else src
+
+    if isinstance(src, string_types):
+        subject = src
+    else:
+        subject = stc.subject
+
+    if isinstance(src, string_types):
+        subjects_dir = get_subjects_dir(subjects_dir)
+        surf_path_from = op.join(subjects_dir, src, 'surf')
+        rr_lh, tris_lh = read_surface(op.join(surf_path_from,
+                                      'lh.white'))
+        rr_rh, tris_rh = read_surface(op.join(surf_path_from,
+                                      'rh.white'))
+        rr = [rr_lh, rr_rh]
+        tris = [tris_lh, tris_rh]
+    else:
+        if not isinstance(src, SourceSpaces):
+            raise TypeError('src must be a string or a set of source spaces')
+        if len(src) != 2:
+            raise ValueError('source space should contain the 2 hemispheres')
+        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
+        tris = [src[0]['tris'], src[1]['tris']]
+
+    labels = []
+    cnt = 0
+    for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
+            zip(['lh', 'rh'], stc.vertices, tris, rr)):
+        this_data = stc.data[cnt:cnt + len(this_vertno)]
+        e = mesh_edges(this_tris)
+        e.data[e.data == 2] = 1
+        n_vertices = e.shape[0]
+        e = e + sparse.eye(n_vertices, n_vertices)
+
+        clusters = [this_vertno[np.any(this_data, axis=1)]]
+
+        cnt += len(this_vertno)
+
+        clusters = [c for c in clusters if len(c) > 0]
+
+        if len(clusters) == 0:
+            this_labels = None
+        else:
+            this_labels = []
+            colors = _n_colors(len(clusters))
+            for c, color in zip(clusters, colors):
+                idx_use = c
+                for k in range(smooth):
+                    e_use = e[:, idx_use]
+                    data1 = e_use * np.ones(len(idx_use))
+                    idx_use = np.where(data1)[0]
+
+                label = Label(idx_use, this_rr[idx_use], None, hemi,
+                              'Label from stc', subject=subject,
+                              color=color)
+
+                this_labels.append(label)
+
+            this_labels = this_labels[0]
+
+        labels.append(this_labels)
+
+    return labels
+
+
+def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
+    if comment:
+        assert_equal(l0.comment, l1.comment)
+    if color:
+        assert_equal(l0.color, l1.color)
+
+    for attr in ['hemi', 'subject']:
         attr0 = getattr(l0, attr)
         attr1 = getattr(l1, attr)
         msg = "label.%s: %r != %r" % (attr, attr0, attr1)
@@ -82,11 +191,23 @@ def test_label_addition():
 
     assert_equal(len(l0), len(idx0))
 
+    l_good = l0.copy()
+    l_good.subject = 'sample'
+    l_bad = l1.copy()
+    l_bad.subject = 'foo'
+    assert_raises(ValueError, l_good.__add__, l_bad)
+    assert_raises(TypeError, l_good.__add__, 'foo')
+    assert_raises(ValueError, l_good.__sub__, l_bad)
+    assert_raises(TypeError, l_good.__sub__, 'foo')
+
     # adding non-overlapping labels
     l01 = l0 + l1
     assert_equal(len(l01), len(l0) + len(l1))
     assert_array_equal(l01.values[:len(l0)], l0.values)
     assert_equal(l01.color, l0.color)
+    # subtraction
+    assert_labels_equal(l01 - l0, l1, comment=False, color=False)
+    assert_labels_equal(l01 - l1, l0, comment=False, color=False)
 
     # adding overlappig labels
     l = l0 + l2
@@ -105,13 +226,29 @@ def test_label_addition():
     assert_equal(bhl.hemi, 'both')
     assert_equal(len(bhl), len(l0) + len(l2))
     assert_equal(bhl.color, l.color)
+    assert_true('BiHemiLabel' in repr(bhl))
+    # subtraction
+    assert_labels_equal(bhl - l0, l2)
+    assert_labels_equal(bhl - l2, l0)
 
     bhl2 = l1 + bhl
     assert_labels_equal(bhl2.lh, l01)
     assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
+    assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices)  # rh label
+    assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
+    assert_raises(TypeError, bhl.__add__, 5)
+
+    # subtraction
+    bhl_ = bhl2 - l1
+    assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
+    assert_labels_equal(bhl_.rh, bhl.rh)
+    assert_labels_equal(bhl2 - l2, l0 + l1)
+    assert_labels_equal(bhl2 - l1 - l0, l2)
+    bhl_ = bhl2 - bhl2
+    assert_array_equal(bhl_.vertices, [])
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_label_in_src():
     """Test label in src"""
     src = read_source_spaces(src_fname)
@@ -141,18 +278,23 @@ def test_label_in_src():
     assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_label_io_and_time_course_estimates():
     """Test IO for label + stc files
     """
-    values, times, vertices = label_time_courses(real_label_fname, stc_fname)
-    assert_true(len(times) == values.shape[1])
-    assert_true(len(vertices) == values.shape[0])
+    stc = read_source_estimate(stc_fname)
+    label = read_label(real_label_fname)
+    stc_label = stc.in_label(label)
 
+    assert_true(len(stc_label.times) == stc_label.data.shape[1])
+    assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
 
+
+ at testing.requires_testing_data
 def test_label_io():
     """Test IO of label files
     """
+    tempdir = _TempDir()
     label = read_label(label_fname)
 
     # label attributes
@@ -184,7 +326,54 @@ def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
             assert_array_equal(label_a.pos, label_b.pos)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
+def test_annot_io():
+    """Test I/O from and to *.annot files"""
+    # copy necessary files from fsaverage to tempdir
+    tempdir = _TempDir()
+    subject = 'fsaverage'
+    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
+    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
+    label_dir = os.path.join(tempdir, subject, 'label')
+    surf_dir = os.path.join(tempdir, subject, 'surf')
+    os.makedirs(label_dir)
+    os.mkdir(surf_dir)
+    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
+    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
+    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
+    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
+
+    # read original labels
+    assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
+                  subjects_dir=tempdir)
+    labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
+                                    subjects_dir=tempdir)
+
+    # test saving parcellation only covering one hemisphere
+    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
+    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
+    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
+    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
+    assert_equal(len(parc1), len(parc))
+    for l1, l in zip(parc1, parc):
+        assert_labels_equal(l1, l)
+
+    # test saving only one hemisphere
+    parc = [l for l in labels if l.name.startswith('LOBE')]
+    write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
+                          subjects_dir=tempdir)
+    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
+    assert_true(os.path.isfile(annot_fname % 'l'))
+    assert_false(os.path.isfile(annot_fname % 'r'))
+    parc1 = read_labels_from_annot(subject, 'myparc2',
+                                   annot_fname=annot_fname % 'l',
+                                   subjects_dir=tempdir)
+    parc_lh = [l for l in parc if l.name.endswith('lh')]
+    for l1, l in zip(parc1, parc_lh):
+        assert_labels_equal(l1, l)
+
+
+ at testing.requires_testing_data
 def test_read_labels_from_annot():
     """Test reading labels from FreeSurfer parcellation
     """
@@ -200,7 +389,9 @@ def test_read_labels_from_annot():
     for label in labels_lh:
         assert_true(label.name.endswith('-lh'))
         assert_true(label.hemi == 'lh')
-        assert_is_not(label.color, None)
+        # XXX fails on 2.6 for some reason...
+        if sys.version_info[:2] > (2, 6):
+            assert_is_not(label.color, None)
 
     # read labels using annot_fname
     annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
@@ -242,47 +433,59 @@ def test_read_labels_from_annot():
                   subjects_dir=subjects_dir)
 
 
- at sample.requires_sample_data
- at requires_mne
+ at testing.requires_testing_data
 def test_read_labels_from_annot_annot2labels():
     """Test reading labels from parc. by comparing with mne_annot2labels
     """
-
-    def _mne_annot2labels(subject, subjects_dir, parc):
-        """Get labels using mne_annot2lables"""
-        label_dir = _TempDir()
-        cwd = os.getcwd()
-        try:
-            os.chdir(label_dir)
-            env = os.environ.copy()
-            env['SUBJECTS_DIR'] = subjects_dir
-            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
-            run_subprocess(cmd, env=env)
-            label_fnames = glob.glob(label_dir + '/*.label')
-            label_fnames.sort()
-            labels = [read_label(fname) for fname in label_fnames]
-        finally:
-            del label_dir
-            os.chdir(cwd)
-
-        return labels
-
+    label_fnames = glob.glob(label_dir + '/*.label')
+    label_fnames.sort()
+    labels_mne = [read_label(fname) for fname in label_fnames]
     labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
-    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')
 
     # we have the same result, mne does not fill pos, so ignore it
     _assert_labels_equal(labels, labels_mne, ignore_pos=True)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_write_labels_to_annot():
     """Test writing FreeSurfer parcellation from labels"""
+    tempdir = _TempDir()
 
     labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
 
-    # write left and right hemi labels:
+    # create temporary subjects-dir skeleton
+    surf_dir = op.join(subjects_dir, 'sample', 'surf')
+    temp_surf_dir = op.join(tempdir, 'sample', 'surf')
+    os.makedirs(temp_surf_dir)
+    shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
+    shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
+    os.makedirs(op.join(tempdir, 'sample', 'label'))
+
+    # test automatic filenames
+    dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
+    write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test1')))
+    assert_true(op.exists(dst % ('rh', 'test1')))
+    # lh only
+    for label in labels:
+        if label.hemi == 'lh':
+            break
+    write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test2')))
+    assert_true(op.exists(dst % ('rh', 'test2')))
+    # rh only
+    for label in labels:
+        if label.hemi == 'rh':
+            break
+    write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
+    assert_true(op.exists(dst % ('lh', 'test3')))
+    assert_true(op.exists(dst % ('rh', 'test3')))
+    # label alone
+    assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
+                  'test4', subjects_dir=tempdir)
+
+    # write left and right hemi labels with filenames:
     fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]
-
     for fname in fnames:
         write_labels_to_annot(labels, annot_fname=fname)
 
@@ -355,8 +558,14 @@ def test_write_labels_to_annot():
     assert_equal(label1.name, "unknown-lh")
     assert_true(np.all(in1d(label0.vertices, label1.vertices)))
 
+    # unnamed labels
+    labels4 = labels[:]
+    labels4[0].name = None
+    assert_raises(ValueError, write_labels_to_annot, labels4,
+                  annot_fname=fnames[0])
+
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_split_label():
     """Test splitting labels"""
     aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
@@ -396,7 +605,8 @@ def test_split_label():
     assert_equal(antmost.name, "lingual_div40-lh")
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 @requires_sklearn
 def test_stc_to_label():
     """Test stc_to_label
@@ -407,11 +617,8 @@ def test_stc_to_label():
     src_bad = read_source_spaces(src_bad_fname)
     stc = read_source_estimate(stc_fname, 'sample')
     os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
-    with warnings.catch_warnings(record=True) as w:  # connectedness warning
-        warnings.simplefilter('always')
-        labels1 = stc_to_label(stc, src='sample', smooth=3)
-        labels2 = stc_to_label(stc, src=src, smooth=3)
-    assert_true(len(w) > 0)
+    labels1 = _stc_to_label(stc, src='sample', smooth=3)
+    labels2 = _stc_to_label(stc, src=src, smooth=3)
     assert_equal(len(labels1), len(labels2))
     for l1, l2 in zip(labels1, labels2):
         assert_labels_equal(l1, l2, decimal=4)
@@ -420,6 +627,7 @@ def test_stc_to_label():
         warnings.simplefilter('always')
         labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
                                             connected=True)
+
     assert_true(len(w) > 0)
     assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
                   connected=True)
@@ -428,6 +636,28 @@ def test_stc_to_label():
     assert_equal(len(labels_lh), 1)
     assert_equal(len(labels_rh), 1)
 
+    # test getting tris
+    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
+    assert_raises(ValueError, spatial_tris_connectivity, tris,
+                  remap_vertices=False)
+    connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
+    assert_true(connectivity.shape[0] == len(stc.vertices[0]))
+
+    # "src" as a subject name
+    assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
+                  connected=False, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
+                  smooth=False, connected=False, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
+                  connected=True, subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
+                  connected=False, subjects_dir=subjects_dir)
+    labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
+                                        connected=False,
+                                        subjects_dir=subjects_dir)
+    assert_true(len(labels_lh) > 1)
+    assert_true(len(labels_rh) > 1)
+
     # with smooth='patch'
     with warnings.catch_warnings(record=True) as w:  # connectedness warning
         warnings.simplefilter('always')
@@ -438,7 +668,8 @@ def test_stc_to_label():
         assert_labels_equal(l1, l2, decimal=4)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_morph():
     """Test inter-subject label morphing
     """
@@ -452,7 +683,7 @@ def test_morph():
         # this should throw an error because the label has all zero values
         assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
         label.values.fill(1)
-        label.morph(None, 'fsaverage', 5, grade, subjects_dir, 2,
+        label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1,
                     copy=False)
         label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2,
                     copy=False)
@@ -461,14 +692,19 @@ def test_morph():
         vals.append(label.vertices)
     assert_array_equal(vals[0], vals[1])
     # make sure label smoothing can run
-    label.morph(label.subject, 'fsaverage', 5,
-                [np.arange(10242), np.arange(10242)], subjects_dir, 2,
-                copy=False)
-    # subject name should be inferred now
-    label.smooth(subjects_dir=subjects_dir)
-
-
- at sample.requires_sample_data
+    assert_equal(label.subject, 'sample')
+    verts = [np.arange(10242), np.arange(10242)]
+    for hemi in ['lh', 'rh']:
+        label.hemi = hemi
+        label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
+    assert_raises(TypeError, label.morph, None, 1, 5, verts,
+                  subjects_dir, 2)
+    assert_raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
+                  subjects_dir, 2)
+    label.smooth(subjects_dir=subjects_dir)  # make sure this runs
+
+
+ at testing.requires_testing_data
 def test_grow_labels():
     """Test generation of circular source labels"""
     seeds = [0, 50000]
@@ -476,8 +712,7 @@ def test_grow_labels():
     should_be_in = [[49, 227], [51207, 48794]]
     hemis = [0, 1]
     names = ['aneurism', 'tumor']
-    labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, n_jobs=2,
-                         names=names)
+    labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
 
     tgt_names = ['aneurism-lh', 'tumor-rh']
     tgt_hemis = ['lh', 'rh']
@@ -511,19 +746,4 @@ def test_grow_labels():
     assert_array_equal(l1.vertices, l0.vertices)
 
 
- at sample.requires_sample_data
-def test_label_time_course():
-    """Test extracting label data from SourceEstimate"""
-    values, times, vertices = label_time_courses(real_label_fname, stc_fname)
-    stc = read_source_estimate(stc_fname)
-    label_lh = read_label(real_label_fname)
-    stc_lh = stc.in_label(label_lh)
-    assert_array_almost_equal(stc_lh.data, values)
-    assert_array_almost_equal(stc_lh.times, times)
-    assert_array_almost_equal(stc_lh.vertno[0], vertices)
-
-    label_rh = read_label(real_label_rh_fname)
-    stc_rh = stc.in_label(label_rh)
-    label_bh = label_rh + label_lh
-    stc_bh = stc.in_label(label_bh)
-    assert_array_equal(stc_bh.data, np.vstack((stc_lh.data, stc_rh.data)))
+run_tests_if_main()
diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py
index 4c67da7..e9af0ed 100644
--- a/mne/tests/test_proj.py
+++ b/mne/tests/test_proj.py
@@ -1,5 +1,5 @@
 import os.path as op
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_raises
 import warnings
 
 import numpy as np
@@ -9,14 +9,17 @@ from numpy.testing import (assert_array_almost_equal, assert_allclose,
 import copy as cp
 
 import mne
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import pick_types
 from mne.io import Raw
 from mne import compute_proj_epochs, compute_proj_evoked, compute_proj_raw
-from mne.io.proj import make_projector, activate_proj
-from mne.proj import read_proj, write_proj, make_eeg_average_ref_proj
+from mne.io.proj import (make_projector, activate_proj,
+                         _needs_eeg_average_ref_proj)
+from mne.proj import (read_proj, write_proj, make_eeg_average_ref_proj,
+                      _has_eeg_average_ref_proj)
 from mne import read_events, Epochs, sensitivity_map, read_source_estimate
-from mne.utils import _TempDir
+from mne.utils import (_TempDir, run_tests_if_main, clean_warning_registry,
+                       slow_test)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
@@ -27,18 +30,16 @@ proj_fname = op.join(base_dir, 'test-proj.fif')
 proj_gz_fname = op.join(base_dir, 'test-proj.fif.gz')
 bads_fname = op.join(base_dir, 'test_bads.txt')
 
-data_path = sample.data_path(download=False)
-sample_path = op.join(data_path, 'MEG', 'sample')
-fwd_fname = op.join(sample_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
-sensmap_fname = op.join(sample_path, 'sample_audvis-%s-oct-6-fwd-sensmap-%s.w')
+sample_path = op.join(testing.data_path(download=False), 'MEG', 'sample')
+fwd_fname = op.join(sample_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
+sensmap_fname = op.join(sample_path,
+                        'sample_audvis_trunc-%s-oct-4-fwd-sensmap-%s.w')
 
 # sample dataset should be updated to reflect mne conventions
 eog_fname = op.join(sample_path, 'sample_audvis_eog_proj.fif')
 
-tempdir = _TempDir()
 
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_sensitivity_maps():
     """Test sensitivity map computation"""
     fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
@@ -78,10 +79,15 @@ def test_sensitivity_maps():
     # test corner case for EEG
     stc = sensitivity_map(fwd, projs=[make_eeg_average_ref_proj(fwd['info'])],
                           ch_type='eeg', exclude='bads')
+    # test volume source space
+    fname = op.join(sample_path, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
+    fwd = mne.read_forward_solution(fname)
+    sensitivity_map(fwd)
 
 
 def test_compute_proj_epochs():
     """Test SSP computation on epochs"""
+    tempdir = _TempDir()
     event_id, tmin, tmax = 1, -0.2, 0.3
 
     raw = Raw(raw_fname, preload=True)
@@ -136,12 +142,15 @@ def test_compute_proj_epochs():
     # XXX : test something
 
     # test parallelization
-    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2)
+    projs = compute_proj_epochs(epochs, n_grad=1, n_mag=1, n_eeg=0, n_jobs=2,
+                                desc_prefix='foobar')
+    assert_true(all('foobar' in x['desc'] for x in projs))
     projs = activate_proj(projs)
     proj_par, _, _ = make_projector(projs, epochs.ch_names, bads=[])
     assert_allclose(proj, proj_par, rtol=1e-8, atol=1e-16)
 
     # test warnings on bad filenames
+    clean_warning_registry()
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
         proj_badname = op.join(tempdir, 'test-bad-name.fif.gz')
@@ -151,11 +160,14 @@ def test_compute_proj_epochs():
     assert_equal(len(w), 2)
 
 
+ at slow_test
 def test_compute_proj_raw():
     """Test SSP computation on raw"""
+    tempdir = _TempDir()
     # Test that the raw projectors work
     raw_time = 2.5  # Do shorter amount for speed
-    raw = Raw(raw_fname, preload=True).crop(0, raw_time, False)
+    raw = Raw(raw_fname).crop(0, raw_time, False)
+    raw.load_data()
     for ii in (0.25, 0.5, 1, 2):
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter('always')
@@ -214,3 +226,53 @@ def test_compute_proj_raw():
     proj, nproj, U = make_projector(projs, raw.ch_names,
                                     bads=raw.ch_names)
     assert_array_almost_equal(proj, np.eye(len(raw.ch_names)))
+
+
+def test_make_eeg_average_ref_proj():
+    """Test EEG average reference projection"""
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=True)
+    eeg = mne.pick_types(raw.info, meg=False, eeg=True)
+
+    # No average EEG reference
+    assert_true(not np.all(raw._data[eeg].mean(axis=0) < 1e-19))
+
+    # Apply average EEG reference
+    car = make_eeg_average_ref_proj(raw.info)
+    reref = raw.copy()
+    reref.add_proj(car)
+    reref.apply_proj()
+    assert_array_almost_equal(reref._data[eeg].mean(axis=0), 0, decimal=19)
+
+    # Error when custom reference has already been applied
+    raw.info['custom_ref_applied'] = True
+    assert_raises(RuntimeError, make_eeg_average_ref_proj, raw.info)
+
+
+def test_has_eeg_average_ref_proj():
+    """Test checking whether an EEG average reference exists"""
+    assert_true(not _has_eeg_average_ref_proj([]))
+
+    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
+    assert_true(_has_eeg_average_ref_proj(raw.info['projs']))
+
+
+def test_needs_eeg_average_ref_proj():
+    """Test checking whether a recording needs an EEG average reference"""
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
+    assert_true(_needs_eeg_average_ref_proj(raw.info))
+
+    raw = Raw(raw_fname, add_eeg_ref=True, preload=False)
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+    # No EEG channels
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=True)
+    eeg = [raw.ch_names[c] for c in pick_types(raw.info, meg=False, eeg=True)]
+    raw.drop_channels(eeg)
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+    # Custom ref flag set
+    raw = Raw(raw_fname, add_eeg_ref=False, preload=False)
+    raw.info['custom_ref_applied'] = True
+    assert_true(not _needs_eeg_average_ref_proj(raw.info))
+
+run_tests_if_main()
diff --git a/mne/tests/test_report.py b/mne/tests/test_report.py
index db3da15..e708d82 100644
--- a/mne/tests/test_report.py
+++ b/mne/tests/test_report.py
@@ -1,101 +1,98 @@
-# Author: Mainak Jas <mainak at neuro.hut.fi>
+# Authors: Mainak Jas <mainak at neuro.hut.fi>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: BSD (3-clause)
-
 import os
 import os.path as op
 import glob
 import warnings
+import shutil
 
 from nose.tools import assert_true, assert_equal, assert_raises
 
-from mne import read_evokeds
-from mne.datasets import sample
-from mne.report import Report
+from mne import Epochs, read_events, pick_types, read_evokeds
 from mne.io import Raw
-from mne.utils import _TempDir
+from mne.datasets import testing
+from mne.report import Report
+from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
+                       requires_PIL, run_tests_if_main, slow_test)
+from mne.viz import plot_trans
 
-data_dir = sample.data_path(download=False)
-base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
-                               'data'))
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+data_dir = testing.data_path(download=False)
 subjects_dir = op.join(data_dir, 'subjects')
+report_dir = op.join(data_dir, 'MEG', 'sample')
+raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
+event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
+cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
+fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
+inv_fname = op.join(report_dir,
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
 
-raw_fname = op.join(base_dir, 'test_raw.fif')
-event_name = op.join(base_dir, 'test-eve.fif')
-evoked1_fname = op.join(base_dir, 'test-nf-ave.fif')
-evoked2_fname = op.join(base_dir, 'test-ave.fif')
+base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
+                               'data'))
+evoked_fname = op.join(base_dir, 'test-ave.fif')
 
 # Set our plotters to test mode
-import matplotlib
-matplotlib.use('Agg')  # for testing don't use X server
 
-os.environ['MNE_REPORT_TESTING'] = 'True'
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-tempdir = _TempDir()
-
 
+ at slow_test
+ at testing.requires_testing_data
+ at requires_PIL
 def test_render_report():
     """Test rendering -*.fif files for mne report.
     """
+    tempdir = _TempDir()
+    raw_fname_new = op.join(tempdir, 'temp_raw.fif')
+    event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
+    cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
+    fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
+    inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
+    for a, b in [[raw_fname, raw_fname_new],
+                 [event_fname, event_fname_new],
+                 [cov_fname, cov_fname_new],
+                 [fwd_fname, fwd_fname_new],
+                 [inv_fname, inv_fname_new]]:
+        shutil.copyfile(a, b)
+
+    # create and add -epo.fif and -ave.fif files
+    epochs_fname = op.join(tempdir, 'temp-epo.fif')
+    evoked_fname = op.join(tempdir, 'temp-ave.fif')
+    raw = Raw(raw_fname_new)
+    picks = pick_types(raw.info, meg='mag', eeg=False)  # faster with one type
+    epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
+    epochs.save(epochs_fname)
+    epochs.average().save(evoked_fname)
 
-    report = Report(info_fname=raw_fname)
+    report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
-        report.parse_folder(data_path=base_dir)
-    assert_true(len(w) == 1)
+        report.parse_folder(data_path=tempdir, on_error='raise')
+    assert_true(len(w) >= 1)
 
     # Check correct paths and filenames
-    assert_true(raw_fname in report.fnames)
-    assert_true(event_name in report.fnames)
-    assert_true(report.data_path == base_dir)
-
-    # Check if raw repr is printed correctly
-    raw = Raw(raw_fname)
-    raw_idx = [ii for ii, fname in enumerate(report.fnames)
-               if fname == raw_fname][0]
-    raw_html = report.html[raw_idx]
-    assert_true(raw_html.find(repr(raw)[1:-1]) != -1)
-    assert_true(raw_html.find(str(raw.info['sfreq'])) != -1)
-    assert_true(raw_html.find('class="raw"') != -1)
-    assert_true(raw_html.find(raw_fname) != -1)
-
-    # Check if all files were rendered in the report
-    fnames = glob.glob(op.join(base_dir, '*.fif'))
-    bad_name = 'test_ctf_comp_raw-eve.fif'
-    decrement = any(fname.endswith(bad_name) for fname in fnames)
-    fnames = [fname for fname in fnames if
-              fname.endswith(('-eve.fif', '-ave.fif', '-cov.fif',
-                              '-sol.fif', '-fwd.fif', '-inv.fif',
-                              '-src.fif', '-trans.fif', 'raw.fif',
-                              'sss.fif', '-epo.fif')) and
-              not fname.endswith(bad_name)]
-    # last file above gets created by another test, and it shouldn't be there
-
+    fnames = glob.glob(op.join(tempdir, '*.fif'))
     for fname in fnames:
+        assert_true(op.basename(fname) in
+                    [op.basename(x) for x in report.fnames])
         assert_true(''.join(report.html).find(op.basename(fname)) != -1)
 
     assert_equal(len(report.fnames), len(fnames))
     assert_equal(len(report.html), len(report.fnames))
 
-    evoked1 = read_evokeds(evoked1_fname)
-    evoked2 = read_evokeds(evoked2_fname)
-    assert_equal(len(report.fnames) + len(evoked1) + len(evoked2) - 2,
-                 report.initial_id - decrement)
-
     # Check saving functionality
     report.data_path = tempdir
     report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
     assert_true(op.isfile(op.join(tempdir, 'report.html')))
 
-    # Check add_section functionality
-    fig = evoked1[0].plot(show=False)
-    report.add_section(figs=fig,  # test non-list input
-                       captions=['evoked response'])
-    assert_equal(len(report.html), len(fnames) + 1)
+    assert_equal(len(report.html), len(fnames))
     assert_equal(len(report.html), len(report.fnames))
-    assert_raises(ValueError, report.add_section, figs=[fig, fig],
-                  captions='H')
 
     # Check saving same report to new filename
     report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
@@ -106,14 +103,164 @@ def test_render_report():
                 overwrite=True)
     assert_true(op.isfile(op.join(tempdir, 'report.html')))
 
+    # Check pattern matching with multiple patterns
+    pattern = ['*raw.fif', '*eve.fif']
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(data_path=tempdir, pattern=pattern)
+    assert_true(len(w) >= 1)
+
+    fnames = glob.glob(op.join(tempdir, '*.raw')) + \
+        glob.glob(op.join(tempdir, '*.raw'))
+    for fname in fnames:
+        assert_true(op.basename(fname) in
+                    [op.basename(x) for x in report.fnames])
+        assert_true(''.join(report.html).find(op.basename(fname)) != -1)
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+ at requires_PIL
+def test_render_add_sections():
+    """Test adding figures/images to section.
+    """
+    from PIL import Image
+    tempdir = _TempDir()
+    import matplotlib.pyplot as plt
+    report = Report(subjects_dir=subjects_dir)
+    # Check add_figs_to_section functionality
+    fig = plt.plot([1, 2], [1, 2])[0].figure
+    report.add_figs_to_section(figs=fig,  # test non-list input
+                               captions=['evoked response'], scale=1.2,
+                               image_format='svg')
+    assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
+                  captions='H')
+    assert_raises(ValueError, report.add_figs_to_section, figs=fig,
+                  captions=['foo'], scale=0, image_format='svg')
+    assert_raises(ValueError, report.add_figs_to_section, figs=fig,
+                  captions=['foo'], scale=1e-10, image_format='svg')
+    # need to recreate because calls above change size
+    fig = plt.plot([1, 2], [1, 2])[0].figure
+
+    # Check add_images_to_section with png and then gif
+    img_fname = op.join(tempdir, 'testimage.png')
+    fig.savefig(img_fname)
+    report.add_images_to_section(fnames=[img_fname],
+                                 captions=['evoked response'])
 
- at sample.requires_sample_data
+    im = Image.open(img_fname)
+    op.join(tempdir, 'testimage.gif')
+    im.save(img_fname)  # matplotlib does not support gif
+    report.add_images_to_section(fnames=[img_fname],
+                                 captions=['evoked response'])
+
+    assert_raises(ValueError, report.add_images_to_section,
+                  fnames=[img_fname, img_fname], captions='H')
+
+    assert_raises(ValueError, report.add_images_to_section,
+                  fnames=['foobar.xxx'], captions='H')
+
+    evoked = read_evokeds(evoked_fname, condition='Left Auditory',
+                          baseline=(-0.2, 0.0))
+    fig = plot_trans(evoked.info, trans_fname, subject='sample',
+                     subjects_dir=subjects_dir)
+
+    report.add_figs_to_section(figs=fig,  # test non-list input
+                               captions='random image', scale=1.2)
+
+
+ at slow_test
+ at testing.requires_testing_data
+ at requires_mayavi
+ at requires_nibabel()
 def test_render_mri():
     """Test rendering MRI for mne report.
     """
+    tempdir = _TempDir()
+    trans_fname_new = op.join(tempdir, 'temp-trans.fif')
+    for a, b in [[trans_fname, trans_fname_new]]:
+        shutil.copyfile(a, b)
     report = Report(info_fname=raw_fname,
                     subject='sample', subjects_dir=subjects_dir)
     with warnings.catch_warnings(record=True):
         warnings.simplefilter('always')
-        report.parse_folder(data_path=data_dir,
-                            pattern='*sample_audvis_raw-trans.fif')
+        report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
+                            n_jobs=2)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+
+ at testing.requires_testing_data
+ at requires_nibabel()
+def test_render_mri_without_bem():
+    """Test rendering MRI without BEM for mne report.
+    """
+    tempdir = _TempDir()
+    os.mkdir(op.join(tempdir, 'sample'))
+    os.mkdir(op.join(tempdir, 'sample', 'mri'))
+    shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=tempdir)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        report.parse_folder(tempdir)
+    assert_true(len(w) >= 1)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+
+ at testing.requires_testing_data
+ at requires_nibabel()
+def test_add_htmls_to_section():
+    """Test adding html str to mne report.
+    """
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    html = '<b>MNE-Python is AWESOME</b>'
+    caption, section = 'html', 'html_section'
+    report.add_htmls_to_section(html, caption, section)
+    idx = report._sectionlabels.index('report_' + section)
+    html_compare = report.html[idx]
+    assert_true(html in html_compare)
+
+
+def test_add_slider_to_section():
+    """Test adding a slider with a series of images to mne report.
+    """
+    tempdir = _TempDir()
+    from matplotlib import pyplot as plt
+    report = Report(info_fname=raw_fname,
+                    subject='sample', subjects_dir=subjects_dir)
+    section = 'slider_section'
+    figs = list()
+    figs.append(plt.figure())
+    plt.plot([1, 2, 3])
+    plt.close('all')
+    figs.append(plt.figure())
+    plt.plot([3, 2, 1])
+    plt.close('all')
+    report.add_slider_to_section(figs, section=section)
+    report.save(op.join(tempdir, 'report.html'), open_browser=False)
+
+    assert_raises(NotImplementedError, report.add_slider_to_section,
+                  [figs, figs])
+    assert_raises(ValueError, report.add_slider_to_section, figs, ['wug'])
+    assert_raises(TypeError, report.add_slider_to_section, figs, 'wug')
+
+
+def test_validate_input():
+    report = Report()
+    items = ['a', 'b', 'c']
+    captions = ['Letter A', 'Letter B', 'Letter C']
+    section = 'ABCs'
+    comments = ['First letter of the alphabet.',
+                'Second letter of the alphabet',
+                'Third letter of the alphabet']
+    assert_raises(ValueError, report._validate_input, items, captions[:-1],
+                  section, comments=None)
+    assert_raises(ValueError, report._validate_input, items, captions, section,
+                  comments=comments[:-1])
+    values = report._validate_input(items, captions, section, comments=None)
+    items_new, captions_new, comments_new = values
+    assert_equal(len(comments_new), len(items))
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py
index 6360ae5..6fa9fdd 100644
--- a/mne/tests/test_source_estimate.py
+++ b/mne/tests/test_source_estimate.py
@@ -10,39 +10,46 @@ from numpy.testing import (assert_array_almost_equal, assert_array_equal,
 
 from scipy.fftpack import fft
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
-                 read_source_spaces)
+                 read_source_spaces, MixedSourceEstimate)
 from mne import read_source_estimate, morph_data, extract_label_time_course
 from mne.source_estimate import (spatio_temporal_tris_connectivity,
                                  spatio_temporal_src_connectivity,
-                                 compute_morph_matrix, grade_to_vertices)
+                                 compute_morph_matrix, grade_to_vertices,
+                                 grade_to_tris)
 
 from mne.minimum_norm import read_inverse_operator
 from mne.label import read_labels_from_annot, label_sign_flip
 from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
-                       requires_pytables)
+                       requires_h5py, run_tests_if_main, slow_test)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
-fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
 fname_inv = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-meg-oct-6-meg-inv.fif')
+                    'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
+fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
+fname_src = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
+fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg')
+fname_smorph = op.join(data_path, 'MEG', 'sample',
+                       'sample_audvis_trunc-meg')
+fname_fmorph = op.join(data_path, 'MEG', 'sample',
+                       'fsaverage_audvis_trunc-meg')
 fname_vol = op.join(data_path, 'MEG', 'sample',
-                    'sample_audvis-grad-vol-7-fwd-sensmap-vol.w')
+                    'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
 fname_vsrc = op.join(data_path, 'MEG', 'sample',
-                     'sample_audvis-meg-vol-7-fwd.fif')
-fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
-
-tempdir = _TempDir()
+                     'sample_audvis_trunc-meg-vol-7-fwd.fif')
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_volume_stc():
     """Test volume STCs
     """
+    tempdir = _TempDir()
     N = 100
     data = np.arange(N)[:, np.newaxis]
     datas = [data, data, np.arange(2)[:, np.newaxis]]
@@ -57,7 +64,7 @@ def test_volume_stc():
             stc_new.save(fname_temp)
             stc_new = read_source_estimate(fname_temp)
             assert_true(isinstance(stc_new, VolSourceEstimate))
-            assert_array_equal(vertno_read, stc_new.vertno)
+            assert_array_equal(vertno_read, stc_new.vertices)
             assert_array_almost_equal(stc.data, stc_new.data)
 
     # now let's actually read a MNE-C processed file
@@ -72,7 +79,7 @@ def test_volume_stc():
         stc_new.save(fname_temp, ftype='w')
         stc_new = read_source_estimate(fname_temp)
         assert_true(isinstance(stc_new, VolSourceEstimate))
-        assert_array_equal(stc.vertno, stc_new.vertno)
+        assert_array_equal(stc.vertices, stc_new.vertices)
         assert_array_almost_equal(stc.data, stc_new.data)
 
     # save the stc as a nifti file and export
@@ -108,19 +115,22 @@ def test_volume_stc():
         print('Save as nifti test skipped, needs NiBabel')
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_expand():
     """Test stc expansion
     """
-    stc = read_source_estimate(fname, 'sample')
+    stc = read_source_estimate(fname_stc, 'sample')
     assert_true('sample' in repr(stc))
     labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
                                        subjects_dir=subjects_dir)
-    stc_limited = stc.in_label(labels_lh[0] + labels_lh[1])
+    new_label = labels_lh[0] + labels_lh[1]
+    stc_limited = stc.in_label(new_label)
     stc_new = stc_limited.copy()
     stc_new.data.fill(0)
     for label in labels_lh[:2]:
-        stc_new += stc.in_label(label).expand(stc_limited.vertno)
+        stc_new += stc.in_label(label).expand(stc_limited.vertices)
+    assert_raises(TypeError, stc_new.expand, stc_limited.vertices[0])
+    assert_raises(ValueError, stc_new.expand, [stc_limited.vertices[0]])
     # make sure we can't add unless vertno agree
     assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
 
@@ -133,22 +143,24 @@ def _fake_stc(n_time=10):
 def test_io_stc():
     """Test IO for STC files
     """
+    tempdir = _TempDir()
     stc = _fake_stc()
     stc.save(op.join(tempdir, "tmp.stc"))
     stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
 
     assert_array_almost_equal(stc.data, stc2.data)
     assert_array_almost_equal(stc.tmin, stc2.tmin)
-    assert_equal(len(stc.vertno), len(stc2.vertno))
-    for v1, v2 in zip(stc.vertno, stc2.vertno):
+    assert_equal(len(stc.vertices), len(stc2.vertices))
+    for v1, v2 in zip(stc.vertices, stc2.vertices):
         assert_array_almost_equal(v1, v2)
     assert_array_almost_equal(stc.tstep, stc2.tstep)
 
 
- at requires_pytables()
+ at requires_h5py
 def test_io_stc_h5():
     """Test IO for STC files using HDF5
     """
+    tempdir = _TempDir()
     stc = _fake_stc()
     assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
     out_name = op.join(tempdir, 'tmp')
@@ -161,14 +173,15 @@ def test_io_stc_h5():
         assert_array_equal(stc_new.data, stc.data)
         assert_array_equal(stc_new.tmin, stc.tmin)
         assert_array_equal(stc_new.tstep, stc.tstep)
-        assert_equal(len(stc_new.vertno), len(stc.vertno))
-        for v1, v2 in zip(stc_new.vertno, stc.vertno):
+        assert_equal(len(stc_new.vertices), len(stc.vertices))
+        for v1, v2 in zip(stc_new.vertices, stc.vertices):
             assert_array_equal(v1, v2)
 
 
 def test_io_w():
     """Test IO for w files
     """
+    tempdir = _TempDir()
     stc = _fake_stc(n_time=1)
     w_fname = op.join(tempdir, 'fake')
     stc.save(w_fname, ftype='w')
@@ -201,7 +214,13 @@ def test_stc_arithmetic():
         a -= 1
         a *= -1
         a /= 2
-        a **= 3
+        b = 2 + a
+        b = 2 - a
+        b = +a
+        assert_array_equal(b.data, a.data)
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter('always')
+            a **= 3
         out.append(a)
 
     assert_array_equal(out[0], out[1].data)
@@ -211,12 +230,12 @@ def test_stc_arithmetic():
     assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_stc_methods():
     """Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
     """
-    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
-    stc = read_source_estimate(fname)
+    stc = read_source_estimate(fname_stc)
 
     # lh_data / rh_data
     assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
@@ -233,10 +252,31 @@ def test_stc_methods():
     vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
     assert_true(hemi == 1)
     # XXX Should design a fool-proof test case, but here were the results:
-    assert_true(vertex == 90186)
-    assert_true(np.round(t, 3) == 0.123)
+    assert_equal(vertex, 124791)
+    assert_equal(np.round(t, 2), 0.12)
+
+    stc = read_source_estimate(fname_stc)
+    stc.subject = 'sample'
+    label_lh = read_labels_from_annot('sample', 'aparc', 'lh',
+                                      subjects_dir=subjects_dir)[0]
+    label_rh = read_labels_from_annot('sample', 'aparc', 'rh',
+                                      subjects_dir=subjects_dir)[0]
+    label_both = label_lh + label_rh
+    for label in (label_lh, label_rh, label_both):
+        assert_true(isinstance(stc.shape, tuple) and len(stc.shape) == 2)
+        stc_label = stc.in_label(label)
+        if label.hemi != 'both':
+            if label.hemi == 'lh':
+                verts = stc_label.vertices[0]
+            else:  # label.hemi == 'rh':
+                verts = stc_label.vertices[1]
+            n_vertices_used = len(label.get_vertices_used(verts))
+            assert_equal(len(stc_label.data), n_vertices_used)
+    stc_lh = stc.in_label(label_lh)
+    assert_raises(ValueError, stc_lh.in_label, label_rh)
+    label_lh.subject = 'foo'
+    assert_raises(RuntimeError, stc.in_label, label_lh)
 
-    stc = read_source_estimate(fname)
     stc_new = deepcopy(stc)
     o_sfreq = 1.0 / stc.tstep
     # note that using no padding for this STC reduces edge ringing...
@@ -249,7 +289,7 @@ def test_stc_methods():
     assert_array_almost_equal(stc_new.data, stc.data, 5)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_extract_label_time_course():
     """Test extraction of label time courses from stc
     """
@@ -342,19 +382,21 @@ def test_extract_label_time_course():
     assert_true(x.size == 0)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_morph_data():
     """Test morphing of data
     """
+    tempdir = _TempDir()
     subject_from = 'sample'
     subject_to = 'fsaverage'
-    fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
-    stc_from = read_source_estimate(fname, subject='sample')
-    fname = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis-meg')
-    stc_to = read_source_estimate(fname)
+    stc_from = read_source_estimate(fname_smorph, subject='sample')
+    stc_to = read_source_estimate(fname_fmorph)
     # make sure we can specify grade
     stc_from.crop(0.09, 0.1)  # for faster computation
     stc_to.crop(0.09, 0.1)  # for faster computation
+    assert_raises(ValueError, stc_from.morph, subject_to, grade=3, smooth=-1,
+                  subjects_dir=subjects_dir)
     stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
                              subjects_dir=subjects_dir)
     stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
@@ -368,16 +410,39 @@ def test_morph_data():
     stc_to3 = morph_data(subject_from, subject_to, stc_from,
                          grade=vertices_to, smooth=12, buffer_size=3,
                          subjects_dir=subjects_dir)
+    # make sure we get a warning about # of steps
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        morph_data(subject_from, subject_to, stc_from,
+                   grade=vertices_to, smooth=1, buffer_size=3,
+                   subjects_dir=subjects_dir)
+    assert_equal(len(w), 2)
 
     assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
     assert_array_almost_equal(stc_to1.data, stc_to2.data)
     assert_array_almost_equal(stc_to1.data, stc_to3.data)
     # make sure precomputed morph matrices work
     morph_mat = compute_morph_matrix(subject_from, subject_to,
-                                     stc_from.vertno, vertices_to,
+                                     stc_from.vertices, vertices_to,
                                      smooth=12, subjects_dir=subjects_dir)
     stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
     assert_array_almost_equal(stc_to1.data, stc_to3.data)
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, vertices_to, 'foo')
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, [vertices_to[0]], morph_mat)
+    assert_raises(ValueError, stc_from.morph_precomputed,
+                  subject_to, [vertices_to[0][:-1], vertices_to[1]], morph_mat)
+    assert_raises(ValueError, stc_from.morph_precomputed, subject_to,
+                  vertices_to, morph_mat, subject_from='foo')
+
+    # steps warning
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        compute_morph_matrix(subject_from, subject_to,
+                             stc_from.vertices, vertices_to,
+                             smooth=1, subjects_dir=subjects_dir)
+    assert_equal(len(w), 2)
 
     mean_from = stc_from.data.mean(axis=0)
     mean_to = stc_to1.data.mean(axis=0)
@@ -388,19 +453,10 @@ def test_morph_data():
                          smooth=12, buffer_size=3, subjects_dir=subjects_dir)
     assert_true(stc_to5.data.shape[0] == 163842 + 163842)
 
-    # test morphing to the same subject
-    stc_to6 = stc_from.morph(subject_from, grade=stc_from.vertno, smooth=1,
-                             subjects_dir=subjects_dir)
-    mask = np.ones(stc_from.data.shape[0], dtype=np.bool)
-    # XXX: there is a bug somewhere that causes a difference at 2 vertices..
-    mask[6799] = False
-    mask[6800] = False
-    assert_array_almost_equal(stc_from.data[mask], stc_to6.data[mask], 5)
-
     # Morph sparse data
     # Make a sparse stc
-    stc_from.vertno[0] = stc_from.vertno[0][[100, 500]]
-    stc_from.vertno[1] = stc_from.vertno[1][[200]]
+    stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
+    stc_from.vertices[1] = stc_from.vertices[1][[200]]
     stc_from._data = stc_from._data[:3]
 
     assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
@@ -416,7 +472,7 @@ def test_morph_data():
     assert_equal(stc_from.tmin, stc_from.tmin)
     assert_equal(stc_from.tstep, stc_from.tstep)
 
-    stc_from.vertno[0] = np.array([], dtype=np.int64)
+    stc_from.vertices[0] = np.array([], dtype=np.int64)
     stc_from._data = stc_from._data[:1]
 
     stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
@@ -479,7 +535,7 @@ def test_transform():
     stcs_t = stc.transform(_my_trans, copy=True)
     assert_true(isinstance(stcs_t, list))
     assert_array_equal(stc.times, stcs_t[0].times)
-    assert_equal(stc.vertno, stcs_t[0].vertno)
+    assert_equal(stc.vertices, stcs_t[0].vertices)
 
     data = np.concatenate((stcs_t[0].data[:, :, None],
                            stcs_t[1].data[:, :, None]), axis=2)
@@ -506,8 +562,8 @@ def test_transform():
     stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
     assert_true(isinstance(stc, SourceEstimate))
     assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
-    assert_true(len(stc.vertno[0]) == 0)
-    assert_equal(stc.vertno[1], verts_rh)
+    assert_true(len(stc.vertices[0]) == 0)
+    assert_equal(stc.vertices[1], verts_rh)
     assert_array_equal(stc.data, data_t)
 
     times = np.round(1000 * stc.times)
@@ -535,7 +591,7 @@ def test_spatio_temporal_tris_connectivity():
         assert_array_equal(c, n)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_spatio_temporal_src_connectivity():
     """Test spatio-temporal connectivity from source spaces"""
     tris = np.array([[0, 1, 2], [3, 4, 5]])
@@ -565,9 +621,11 @@ def test_spatio_temporal_src_connectivity():
     b = sum([s['nuse'] for s in inverse_operator['src']])
     assert_true(a == b)
 
+    assert_equal(grade_to_tris(5).shape, [40960, 3])
+
 
 @requires_pandas
-def test_as_data_frame():
+def test_to_data_frame():
     """Test stc Pandas exporter"""
     n_vert, n_times = 10, 5
     vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
@@ -577,15 +635,15 @@ def test_as_data_frame():
     stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
                                 subject='sample')
     for stc in [stc_surf, stc_vol]:
-        assert_raises(ValueError, stc.as_data_frame, index=['foo', 'bar'])
+        assert_raises(ValueError, stc.to_data_frame, index=['foo', 'bar'])
         for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
-            df = stc.as_data_frame(index=ind)
+            df = stc.to_data_frame(index=ind)
             assert_true(df.index.names == ind
                         if isinstance(ind, list) else [ind])
             assert_array_equal(df.values.T[ncat:], stc.data)
             # test that non-indexed data were present as categorial variables
-            with warnings.catch_warnings(record=True):  # pandas
-                df.reset_index().columns[:3] == ['subject', 'time']
+            assert_true(all([c in ['time', 'subject'] for c in
+                             df.reset_index().columns][:2]))
 
 
 def test_get_peak():
@@ -606,7 +664,7 @@ def test_get_peak():
         assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
 
         vert_idx, time_idx = stc.get_peak()
-        vertno = np.concatenate(stc.vertno) if ii == 0 else stc.vertno
+        vertno = np.concatenate(stc.vertices) if ii == 0 else stc.vertices
         assert_true(vert_idx in vertno)
         assert_true(time_idx in stc.times)
 
@@ -614,3 +672,29 @@ def test_get_peak():
                                         time_as_index=True)
         assert_true(vert_idx < stc.data.shape[0])
         assert_true(time_idx < len(stc.times))
+
+
+ at testing.requires_testing_data
+def test_mixed_stc():
+    """Test source estimate from mixed source space
+    """
+    N = 90  # number of sources
+    T = 2  # number of time points
+    S = 3  # number of source spaces
+
+    data = np.random.randn(N, T)
+    vertno = S * [np.arange(N // S)]
+
+    # make sure error is raised if vertices are not a list of length >= 2
+    assert_raises(ValueError, MixedSourceEstimate, data=data,
+                  vertices=[np.arange(N)])
+
+    stc = MixedSourceEstimate(data, vertno, 0, 1)
+
+    vol = read_source_spaces(fname_vsrc)
+
+    # make sure error is raised for plotting surface with volume source
+    assert_raises(ValueError, stc.plot_surface, src=vol)
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py
index 458338c..8fefdf2 100644
--- a/mne/tests/test_source_space.py
+++ b/mne/tests/test_source_space.py
@@ -7,38 +7,44 @@ from nose.plugins.skip import SkipTest
 import numpy as np
 from numpy.testing import assert_array_equal, assert_allclose, assert_equal
 import warnings
-from scipy.spatial.distance import cdist
 
-from mne.datasets import sample
+from mne.datasets import testing
 from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
                  setup_source_space, setup_volume_source_space,
-                 add_source_space_distances)
+                 add_source_space_distances, read_bem_surfaces,
+                 morph_source_spaces, SourceEstimate)
 from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel,
-                       requires_freesurfer, run_subprocess,
-                       requires_mne, requires_scipy_version)
+                       requires_freesurfer, run_subprocess, slow_test,
+                       requires_mne, requires_version, run_tests_if_main)
 from mne.surface import _accumulate_normals, _triangle_neighbors
 from mne.source_space import _get_mgz_header
 from mne.externals.six.moves import zip
+from mne.source_space import (get_volume_labels_from_aseg, SourceSpaces,
+                              _compare_source_spaces)
+from mne.io.constants import FIFF
 
 warnings.simplefilter('always')
 
-# WARNING: test_source_space is imported by forward, so download=False
-# is critical here, otherwise on first import of MNE users will have to
-# download the whole sample dataset!
-base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
-fname_small = op.join(base_dir, 'small-src.fif.gz')
+fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
 fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
+fname_vol = op.join(subjects_dir, 'sample', 'bem',
+                    'sample-volume-7mm-src.fif')
 fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
-                    'sample-5120-bem.fif')
-fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
+                    'sample-1280-bem.fif')
+fname_fs = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')
+fname_morph = op.join(subjects_dir, 'sample', 'bem',
+                      'sample-fsaverage-ico-5-src.fif')
 
-tempdir = _TempDir()
+base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
+fname_small = op.join(base_dir, 'small-src.fif.gz')
 
 
+ at testing.requires_testing_data
 @requires_nibabel(vox2ras_tkr=True)
 def test_mgz_header():
+    """Test MGZ header reading"""
     import nibabel as nib
     header = _get_mgz_header(fname_mri)
     mri_hdr = nib.load(fname_mri).get_header()
@@ -47,7 +53,7 @@ def test_mgz_header():
     assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox'])
 
 
- at requires_scipy_version('0.11')
+ at requires_version('scipy', '0.11')
 def test_add_patch_info():
     """Test adding patch info to source space"""
     # let's setup a small source space
@@ -79,10 +85,11 @@ def test_add_patch_info():
             assert_array_equal(p1, p2)
 
 
- at sample.requires_sample_data
- at requires_scipy_version('0.11')
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
 def test_add_source_space_distances_limited():
     """Test adding distances to source space with a dist_limit"""
+    tempdir = _TempDir()
     src = read_source_spaces(fname)
     src_new = read_source_spaces(fname)
     del src_new[0]['dist']
@@ -116,19 +123,23 @@ def test_add_source_space_distances_limited():
         assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
 
 
- at sample.requires_sample_data
- at requires_scipy_version('0.11')
+ at slow_test
+ at testing.requires_testing_data
+ at requires_version('scipy', '0.11')
 def test_add_source_space_distances():
     """Test adding distances to source space"""
+    tempdir = _TempDir()
     src = read_source_spaces(fname)
     src_new = read_source_spaces(fname)
     del src_new[0]['dist']
     del src_new[1]['dist']
-    n_do = 20  # limit this for speed
+    n_do = 19  # limit this for speed
     src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
     src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
     out_name = op.join(tempdir, 'temp-src.fif')
-    add_source_space_distances(src_new)
+    n_jobs = 2
+    assert_true(n_do % n_jobs != 0)
+    add_source_space_distances(src_new, n_jobs=n_jobs)
     write_source_spaces(out_name, src_new)
     src_new = read_source_spaces(out_name)
 
@@ -156,11 +167,12 @@ def test_add_source_space_distances():
         assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_mne
 def test_discrete_source_space():
     """Test setting up (and reading/writing) discrete source spaces
     """
+    tempdir = _TempDir()
     src = read_source_spaces(fname)
     v = src[0]['vertno']
 
@@ -197,47 +209,65 @@ def test_discrete_source_space():
             os.remove(temp_name)
 
 
- at sample.requires_sample_data
- at requires_mne
+ at slow_test
+ at testing.requires_testing_data
 def test_volume_source_space():
     """Test setting up volume source spaces
     """
-    fname_vol = op.join(data_path, 'subjects', 'sample', 'bem',
-                        'volume-7mm-src.fif')
+    tempdir = _TempDir()
     src = read_source_spaces(fname_vol)
     temp_name = op.join(tempdir, 'temp-src.fif')
-    try:
-        # The one in the sample dataset (uses bem as bounds)
+    surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
+    surf['rr'] *= 1e3  # convert to mm
+    # The one in the testing dataset (uses bem as bounds)
+    for bem, surf in zip((fname_bem, None), (None, surf)):
         src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
-                                            bem=fname_bem, mri=fname_mri,
+                                            bem=bem, surface=surf,
+                                            mri=fname_mri,
                                             subjects_dir=subjects_dir)
         _compare_source_spaces(src, src_new, mode='approx')
+        del src_new
         src_new = read_source_spaces(temp_name)
         _compare_source_spaces(src, src_new, mode='approx')
+    assert_raises(IOError, setup_volume_source_space, 'sample', temp_name,
+                  pos=7.0, bem=None, surface='foo',  # bad surf
+                  mri=fname_mri, subjects_dir=subjects_dir)
 
-        # let's try the spherical one (no bem or surf supplied)
-        run_subprocess(['mne_volume_source_space',
-                        '--grid', '15.0',
-                        '--src', temp_name,
-                        '--mri', fname_mri])
-        src = read_source_spaces(temp_name)
-        src_new = setup_volume_source_space('sample', temp_name, pos=15.0,
-                                            mri=fname_mri,
-                                            subjects_dir=subjects_dir)
-        _compare_source_spaces(src, src_new, mode='approx')
 
-        # now without MRI argument, it should give an error when we try
-        # to read it
-        run_subprocess(['mne_volume_source_space',
-                        '--grid', '15.0',
-                        '--src', temp_name])
-        assert_raises(ValueError, read_source_spaces, temp_name)
-    finally:
-        if op.isfile(temp_name):
-            os.remove(temp_name)
+ at testing.requires_testing_data
+ at requires_mne
+def test_other_volume_source_spaces():
+    """Test setting up other volume source spaces"""
+    # these are split off because they require the MNE tools, and
+    # Travis doesn't seem to like them
+
+    # let's try the spherical one (no bem or surf supplied)
+    tempdir = _TempDir()
+    temp_name = op.join(tempdir, 'temp-src.fif')
+    run_subprocess(['mne_volume_source_space',
+                    '--grid', '7.0',
+                    '--src', temp_name,
+                    '--mri', fname_mri])
+    src = read_source_spaces(temp_name)
+    src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
+                                        mri=fname_mri,
+                                        subjects_dir=subjects_dir)
+    _compare_source_spaces(src, src_new, mode='approx')
+    del src
+    del src_new
+    assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
+                  pos=7.0, sphere=[1., 1.], mri=fname_mri,  # bad sphere
+                  subjects_dir=subjects_dir)
 
+    # now without MRI argument, it should give an error when we try
+    # to read it
+    run_subprocess(['mne_volume_source_space',
+                    '--grid', '7.0',
+                    '--src', temp_name])
+    assert_raises(ValueError, read_source_spaces, temp_name)
 
- at sample.requires_sample_data
+
+ at testing.requires_testing_data
 def test_triangle_neighbors():
     """Test efficient vertex neighboring triangles for surfaces"""
     this = read_source_spaces(fname)[0]
@@ -280,12 +310,12 @@ def test_accumulate_normals():
     assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_setup_source_space():
     """Test setting up ico, oct, and all source spaces
     """
-    fname_all = op.join(data_path, 'subjects', 'sample', 'bem',
-                        'sample-all-src.fif')
+    tempdir = _TempDir()
     fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
                         'fsaverage-ico-5-src.fif')
     # first lets test some input params
@@ -311,6 +341,8 @@ def test_setup_source_space():
                                      subjects_dir=subjects_dir, add_dist=False,
                                      overwrite=True)
     _compare_source_spaces(src, src_new, mode='approx')
+    assert_array_equal(src[0]['vertno'], np.arange(10242))
+    assert_array_equal(src[1]['vertno'], np.arange(10242))
 
     # oct-6 (sample) - auto filename + IO
     src = read_source_spaces(fname)
@@ -325,17 +357,21 @@ def test_setup_source_space():
     _compare_source_spaces(src, src_new, mode='approx')
 
     # all source points - no file writing
-    src = read_source_spaces(fname_all)
     src_new = setup_source_space('sample', None, spacing='all',
                                  subjects_dir=subjects_dir, add_dist=False)
-    _compare_source_spaces(src, src_new, mode='approx')
+    assert_true(src_new[0]['nuse'] == len(src_new[0]['rr']))
+    assert_true(src_new[1]['nuse'] == len(src_new[1]['rr']))
+
+    # dense source space to hit surf['inuse'] lines of _create_surf_spacing
+    assert_raises(RuntimeError, setup_source_space, 'sample', None,
+                  spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_read_source_spaces():
     """Test reading of source space meshes
     """
-    src = read_source_spaces(fname, add_geom=True)
+    src = read_source_spaces(fname, patch_stats=True)
 
     # 3D source space
     lh_points = src[0]['rr']
@@ -354,13 +390,16 @@ def test_read_source_spaces():
     assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_write_source_space():
-    """Test writing and reading of source spaces
+    """Test reading and writing of source spaces
     """
-    src0 = read_source_spaces(fname, add_geom=False)
+    tempdir = _TempDir()
+    src0 = read_source_spaces(fname, patch_stats=False)
     write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0)
-    src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'), add_geom=False)
+    src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'),
+                              patch_stats=False)
     _compare_source_spaces(src0, src1)
 
     # test warnings on bad filenames
@@ -369,130 +408,235 @@ def test_write_source_space():
         src_badname = op.join(tempdir, 'test-bad-name.fif.gz')
         write_source_spaces(src_badname, src0)
         read_source_spaces(src_badname)
-        print([ww.message for ww in w])
     assert_equal(len(w), 2)
 
 
-def _compare_source_spaces(src0, src1, mode='exact'):
-    """Compare two source spaces
-
-    Note: this function is also used by forward/tests/test_make_forward.py
-    """
-    for s0, s1 in zip(src0, src1):
-        for name in ['nuse', 'ntri', 'np', 'type', 'id']:
-            print(name)
-            assert_equal(s0[name], s1[name])
-        for name in ['subject_his_id']:
-            if name in s0 or name in s1:
-                print(name)
-                assert_equal(s0[name], s1[name])
-        for name in ['interpolator']:
-            if name in s0 or name in s1:
-                print(name)
-                diffs = (s0['interpolator'] - s1['interpolator']).data
-                if len(diffs) > 0:
-                    assert_true(np.sqrt(np.mean(diffs ** 2)) < 0.05)  # 5%
-        for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
-            print(name)
-            if s0[name] is None:
-                assert_true(s1[name] is None)
-            else:
-                if mode == 'exact':
-                    assert_array_equal(s0[name], s1[name])
-                elif mode == 'approx':
-                    assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-4)
-                else:
-                    raise RuntimeError('unknown mode')
-        if mode == 'exact':
-            for name in ['inuse', 'vertno', 'use_tris']:
-                assert_array_equal(s0[name], s1[name])
-            # these fields will exist if patch info was added, these are
-            # not tested in mode == 'approx'
-            for name in ['nearest', 'nearest_dist']:
-                print(name)
-                if s0[name] is None:
-                    assert_true(s1[name] is None)
-                else:
-                    assert_array_equal(s0[name], s1[name])
-            for name in ['dist_limit']:
-                print(name)
-                assert_true(s0[name] == s1[name])
-            for name in ['dist']:
-                if s0[name] is not None:
-                    assert_equal(s1[name].shape, s0[name].shape)
-                    assert_true(len((s0['dist'] - s1['dist']).data) == 0)
-            for name in ['pinfo']:
-                if s0[name] is not None:
-                    assert_true(len(s0[name]) == len(s1[name]))
-                    for p1, p2 in zip(s0[name], s1[name]):
-                        assert_true(all(p1 == p2))
-        elif mode == 'approx':
-            # deal with vertno, inuse, and use_tris carefully
-            assert_array_equal(s0['vertno'], np.where(s0['inuse'])[0])
-            assert_array_equal(s1['vertno'], np.where(s1['inuse'])[0])
-            assert_equal(len(s0['vertno']), len(s1['vertno']))
-            agreement = np.mean(s0['inuse'] == s1['inuse'])
-            assert_true(agreement > 0.99)
-            if agreement < 1.0:
-                # make sure mismatched vertno are within 1.5mm
-                v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
-                v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
-                dists = cdist(s0['rr'][v0], s1['rr'][v1])
-                assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
-                                atol=1.5e-3)
-            if s0['use_tris'] is not None:  # for "spacing"
-                assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
-            else:
-                assert_true(s1['use_tris'] is None)
-            assert_true(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
-    # The above "if s0[name] is not None" can be removed once the sample
-    # dataset is updated to have a source space with distance info
-    for name in ['working_dir', 'command_line']:
-        if mode == 'exact':
-            assert_equal(src0.info[name], src1.info[name])
-        elif mode == 'approx':
-            print(name)
-            if name in src0.info:
-                assert_true(name in src1.info)
-            else:
-                assert_true(name not in src1.info)
-
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_fs_or_nibabel
 def test_vertex_to_mni():
     """Test conversion of vertices to MNI coordinates
     """
-    # obtained using "tksurfer (sample/fsaverage) (l/r)h white"
+    # obtained using "tksurfer (sample) (l/r)h white"
     vertices = [100960, 7620, 150549, 96761]
-    coords_s = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
-                         [-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
-    coords_f = np.array([[-41.28, -40.04, 18.20], [-6.05, 49.74, -18.15],
-                         [-61.71, -14.55, 20.52], [21.70, -60.84, 25.02]])
+    coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
+                       [-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
     hemis = [0, 0, 0, 1]
-    for coords, subject in zip([coords_s, coords_f], ['sample', 'fsaverage']):
-        coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir)
-        # less than 1mm error
-        assert_allclose(coords, coords_2, atol=1.0)
+    coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
+    # less than 1mm error
+    assert_allclose(coords, coords_2, atol=1.0)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 @requires_freesurfer
 @requires_nibabel()
 def test_vertex_to_mni_fs_nibabel():
     """Test equivalence of vert_to_mni for nibabel and freesurfer
     """
     n_check = 1000
-    for subject in ['sample', 'fsaverage']:
-        vertices = np.random.randint(0, 100000, n_check)
-        hemis = np.random.randint(0, 1, n_check)
-        coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
-                               'nibabel')
-        coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
-                                 'freesurfer')
-        # less than 0.1 mm error
-        assert_allclose(coords, coords_2, atol=0.1)
+    subject = 'sample'
+    vertices = np.random.randint(0, 100000, n_check)
+    hemis = np.random.randint(0, 1, n_check)
+    coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                           'nibabel')
+    coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
+                             'freesurfer')
+    # less than 0.1 mm error
+    assert_allclose(coords, coords_2, atol=0.1)
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_get_volume_label_names():
+    """Test reading volume label names
+    """
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    assert_equal(label_names.count('Brain-Stem'), 1)
+
+
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_source_space_from_label():
+    """Test generating a source space from volume label
+    """
+    tempdir = _TempDir()
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    volume_label = label_names[int(np.random.rand() * len(label_names))]
+
+    # Test pos as dict
+    pos = dict()
+    assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
+                  volume_label=volume_label, mri=aseg_fname)
+
+    # Test no mri provided
+    assert_raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
+                  volume_label=volume_label)
+
+    # Test invalid volume label
+    assert_raises(ValueError, setup_volume_source_space, 'sample',
+                  volume_label='Hello World!', mri=aseg_fname)
+
+    src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                    volume_label=volume_label, mri=aseg_fname,
+                                    add_interpolator=False)
+    assert_equal(volume_label, src[0]['seg_name'])
+
+    # test reading and writing
+    out_name = op.join(tempdir, 'temp-src.fif')
+    write_source_spaces(out_name, src)
+    src_from_file = read_source_spaces(out_name)
+    _compare_source_spaces(src, src_from_file, mode='approx')
+
 
+ at testing.requires_testing_data
+ at requires_freesurfer
+ at requires_nibabel()
+def test_combine_source_spaces():
+    """Test combining source spaces
+    """
+    tempdir = _TempDir()
+    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
+    label_names = get_volume_labels_from_aseg(aseg_fname)
+    volume_labels = [label_names[int(np.random.rand() * len(label_names))]
+                     for ii in range(2)]
+
+    # get a surface source space (no need to test creation here)
+    srf = read_source_spaces(fname, patch_stats=False)
+
+    # setup 2 volume source spaces
+    vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                    volume_label=volume_labels[0],
+                                    mri=aseg_fname, add_interpolator=False)
+
+    # setup a discrete source space
+    rr = np.random.randint(0, 20, (100, 3)) * 1e-3
+    nn = np.zeros(rr.shape)
+    nn[:, -1] = 1
+    pos = {'rr': rr, 'nn': nn}
+    disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
+                                     pos=pos, verbose='error')
+
+    # combine source spaces
+    src = srf + vol + disc
+
+    # test addition of source spaces
+    assert_equal(type(src), SourceSpaces)
+    assert_equal(len(src), 4)
+
+    # test reading and writing
+    src_out_name = op.join(tempdir, 'temp-src.fif')
+    src.save(src_out_name)
+    src_from_file = read_source_spaces(src_out_name)
+    _compare_source_spaces(src, src_from_file, mode='approx')
+
+    # test that all source spaces are in MRI coordinates
+    coord_frames = np.array([s['coord_frame'] for s in src])
+    assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all())
+
+    # test errors for export_volume
+    image_fname = op.join(tempdir, 'temp-image.mgz')
+
+    # source spaces with no volume
+    assert_raises(ValueError, srf.export_volume, image_fname, verbose='error')
+
+    # unrecognized source type
+    disc2 = disc.copy()
+    disc2[0]['type'] = 'kitty'
+    src_unrecognized = src + disc2
+    assert_raises(ValueError, src_unrecognized.export_volume, image_fname,
+                  verbose='error')
+
+    # unrecognized file type
+    bad_image_fname = op.join(tempdir, 'temp-image.png')
+    assert_raises(ValueError, src.export_volume, bad_image_fname,
+                  verbose='error')
+
+    # mixed coordinate frames
+    disc3 = disc.copy()
+    disc3[0]['coord_frame'] = 10
+    src_mixed_coord = src + disc3
+    assert_raises(ValueError, src_mixed_coord.export_volume, image_fname,
+                  verbose='error')
+
+
+ at testing.requires_testing_data
+def test_morph_source_spaces():
+    """Test morphing of source spaces
+    """
+    src = read_source_spaces(fname_fs)
+    src_morph = read_source_spaces(fname_morph)
+    src_morph_py = morph_source_spaces(src, 'sample',
+                                       subjects_dir=subjects_dir)
+    _compare_source_spaces(src_morph, src_morph_py, mode='approx')
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_morphed_source_space_return():
+    """Test returning a morphed source space to the original subject"""
+    # let's create some random data on fsaverage
+    rng = np.random.RandomState(0)
+    data = rng.randn(20484, 1)
+    tmin, tstep = 0, 1.
+    src_fs = read_source_spaces(fname_fs)
+    stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
+                            tmin, tstep, 'fsaverage')
+
+    # Create our morph source space
+    src_morph = morph_source_spaces(src_fs, 'sample',
+                                    subjects_dir=subjects_dir)
+
+    # Morph the data over using standard methods
+    stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph],
+                             smooth=1, subjects_dir=subjects_dir)
+
+    # We can now pretend like this was real data we got e.g. from an inverse.
+    # To be complete, let's remove some vertices
+    keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
+             for v in stc_morph.vertices]
+    stc_morph = SourceEstimate(
+        np.concatenate([stc_morph.lh_data[keeps[0]],
+                        stc_morph.rh_data[keeps[1]]]),
+        [v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
+        'sample')
+
+    # Return it to the original subject
+    stc_morph_return = stc_morph.to_original_src(
+        src_fs, subjects_dir=subjects_dir)
+
+    # Compare to the original data
+    stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices,
+                                      smooth=1,
+                                      subjects_dir=subjects_dir)
+    assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
+    for ii in range(2):
+        assert_array_equal(stc_morph_return.vertices[ii],
+                           stc_morph_morph.vertices[ii])
+    # These will not match perfectly because morphing pushes data around
+    corr = np.corrcoef(stc_morph_return.data[:, 0],
+                       stc_morph_morph.data[:, 0])[0, 1]
+    assert_true(corr > 0.99, corr)
+
+    # Degenerate cases
+    stc_morph.subject = None  # no .subject provided
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
+    stc_morph.subject = 'sample'
+    del src_fs[0]['subject_his_id']  # no name in src_fsaverage
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subjects_dir=subjects_dir)
+    src_fs[0]['subject_his_id'] = 'fsaverage'  # name mismatch
+    assert_raises(ValueError, stc_morph.to_original_src,
+                  src_fs, subject_orig='foo', subjects_dir=subjects_dir)
+    src_fs[0]['subject_his_id'] = 'sample'
+    src = read_source_spaces(fname)  # wrong source space
+    assert_raises(RuntimeError, stc_morph.to_original_src,
+                  src, subjects_dir=subjects_dir)
+
+run_tests_if_main()
 
 # The following code was used to generate small-src.fif.gz.
 # Unfortunately the C code bombs when trying to add source space distances,
diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py
index 326d444..a7e0c1d 100644
--- a/mne/tests/test_surface.py
+++ b/mne/tests/test_surface.py
@@ -1,25 +1,28 @@
 from __future__ import print_function
+import os
 import os.path as op
 import numpy as np
+import warnings
+from shutil import copyfile
+from scipy import sparse
 from nose.tools import assert_true, assert_raises
-from numpy.testing import (assert_array_equal, assert_array_almost_equal,
-                           assert_allclose, assert_equal)
-
-from mne.datasets import sample
-from mne import (read_bem_surfaces, write_bem_surface, read_surface,
-                 write_surface, decimate_surface)
-from mne.surface import (_make_morph_map, read_morph_map, _compute_nearest,
-                         fast_cross_3d, get_head_surf,
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
+
+from mne.datasets import testing
+from mne import read_surface, write_surface, decimate_surface
+from mne.surface import (read_morph_map, _compute_nearest,
+                         fast_cross_3d, get_head_surf, read_curvature,
                          get_meg_helmet_surf)
-from mne.utils import _TempDir, requires_tvtk
+from mne.utils import _TempDir, requires_tvtk, run_tests_if_main, slow_test
 from mne.io import read_info
-from mne.transforms import _get_mri_head_t_from_trans_file
+from mne.transforms import _get_mri_head_t
 
-data_path = sample.data_path(download=False)
+data_path = testing.data_path(download=False)
 subjects_dir = op.join(data_path, 'subjects')
 fname = op.join(subjects_dir, 'sample', 'bem',
-                'sample-5120-5120-5120-bem-sol.fif')
-tempdir = _TempDir()
+                'sample-1280-1280-1280-bem-sol.fif')
+
+warnings.simplefilter('always')
 
 
 def test_helmet():
@@ -34,14 +37,14 @@ def test_helmet():
     fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
     fname_trans = op.join(base_dir, 'tests', 'data',
                           'sample-audvis-raw-trans.txt')
-    trans = _get_mri_head_t_from_trans_file(fname_trans)
+    trans = _get_mri_head_t(fname_trans)[0]
     for fname in [fname_raw, fname_kit_raw, fname_bti_raw, fname_ctf_raw]:
         helmet = get_meg_helmet_surf(read_info(fname), trans)
         assert_equal(len(helmet['rr']), 304)  # they all have 304 verts
         assert_equal(len(helmet['rr']), len(helmet['nn']))
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_head():
     """Test loading the head surface
     """
@@ -82,45 +85,64 @@ def test_compute_nearest():
         assert_array_equal(nn1, nn2)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_make_morph_maps():
     """Test reading and creating morph maps
     """
-    mmap = read_morph_map('fsaverage', 'sample', subjects_dir=subjects_dir)
-    mmap2 = _make_morph_map('fsaverage', 'sample', subjects_dir=subjects_dir)
+    # make a new fake subjects_dir
+    tempdir = _TempDir()
+    for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
+        os.mkdir(op.join(tempdir, subject))
+        os.mkdir(op.join(tempdir, subject, 'surf'))
+        for hemi in ['lh', 'rh']:
+            args = [subject, 'surf', hemi + '.sphere.reg']
+            copyfile(op.join(subjects_dir, *args),
+                     op.join(tempdir, *args))
+
+    # this should trigger the creation of morph-maps dir and create the map
+    mmap = read_morph_map('fsaverage_ds', 'sample_ds', tempdir)
+    mmap2 = read_morph_map('fsaverage_ds', 'sample_ds', subjects_dir)
     assert_equal(len(mmap), len(mmap2))
     for m1, m2 in zip(mmap, mmap2):
         # deal with sparse matrix stuff
         diff = (m1 - m2).data
         assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
 
+    # This will also trigger creation, but it's trivial
+    mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
+    for mm in mmap:
+        assert_true((mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0)
 
- at sample.requires_sample_data
-def test_io_bem_surfaces():
-    """Test reading of bem surfaces
-    """
-    surf = read_bem_surfaces(fname, add_geom=True)
-    surf = read_bem_surfaces(fname, add_geom=False)
-    print("Number of surfaces : %d" % len(surf))
-
-    write_bem_surface(op.join(tempdir, 'bem_surf.fif'), surf[0])
-    surf_read = read_bem_surfaces(op.join(tempdir, 'bem_surf.fif'),
-                                  add_geom=False)
-
-    for key in surf[0].keys():
-        assert_array_almost_equal(surf[0][key], surf_read[0][key])
 
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_io_surface():
     """Test reading and writing of Freesurfer surface mesh files
     """
-    fname = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.inflated')
-    pts, tri = read_surface(fname)
-    write_surface(op.join(tempdir, 'tmp'), pts, tri)
-    c_pts, c_tri = read_surface(op.join(tempdir, 'tmp'))
-    assert_array_equal(pts, c_pts)
-    assert_array_equal(tri, c_tri)
+    tempdir = _TempDir()
+    fname_quad = op.join(data_path, 'subjects', 'bert', 'surf',
+                         'lh.inflated.nofix')
+    fname_tri = op.join(data_path, 'subjects', 'fsaverage', 'surf',
+                        'lh.inflated')
+    for fname in (fname_quad, fname_tri):
+        pts, tri = read_surface(fname)
+        write_surface(op.join(tempdir, 'tmp'), pts, tri)
+        c_pts, c_tri = read_surface(op.join(tempdir, 'tmp'))
+        assert_array_equal(pts, c_pts)
+        assert_array_equal(tri, c_tri)
+
+
+ at testing.requires_testing_data
+def test_read_curv():
+    """Test reading curvature data
+    """
+    fname_curv = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')
+    fname_surf = op.join(data_path, 'subjects', 'fsaverage', 'surf',
+                         'lh.inflated')
+    bin_curv = read_curvature(fname_curv)
+    rr = read_surface(fname_surf)[0]
+    assert_true(len(bin_curv) == len(rr))
+    assert_true(np.logical_or(bin_curv == 0, bin_curv == 1).all())
 
 
 @requires_tvtk
@@ -138,3 +160,6 @@ def test_decimate_surface():
     nirvana = 5
     tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])
     assert_raises(ValueError, decimate_surface, points, tris, n_tri)
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py
index a7cdca1..605f589 100644
--- a/mne/tests/test_transforms.py
+++ b/mne/tests/test_transforms.py
@@ -1,45 +1,55 @@
-from math import pi
+import os
 import os.path as op
+import numpy as np
 
 from nose.tools import assert_true, assert_raises
-from numpy.testing import assert_array_equal, assert_equal, assert_allclose
+from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
+                           assert_almost_equal, assert_array_almost_equal)
 import warnings
 
-from mne.datasets import sample
+from mne.io.constants import FIFF
+from mne.datasets import testing
 from mne import read_trans, write_trans
-from mne.utils import _TempDir
-from mne.transforms import (_get_mri_head_t_from_trans_file, invert_transform,
-                            rotation, rotation3d, rotation_angles)
+from mne.utils import _TempDir, run_tests_if_main
+from mne.transforms import (invert_transform, _get_mri_head_t,
+                            rotation, rotation3d, rotation_angles, _find_trans,
+                            combine_transforms, transform_coordinates,
+                            collect_transforms, apply_trans, translation,
+                            get_ras_to_neuromag_trans, _sphere_to_cartesian,
+                            _polar_to_cartesian, _cartesian_to_sphere)
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_path = sample.data_path(download=False)
-fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-trans.fif')
-fname_eve = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
+data_path = testing.data_path(download=False)
+fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
+fname_eve = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc_raw-eve.fif')
 fname_trans = op.join(op.split(__file__)[0], '..', 'io', 'tests',
                       'data', 'sample-audvis-raw-trans.txt')
 
-tempdir = _TempDir()
 
-
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_get_mri_head_t():
     """Test converting '-trans.txt' to '-trans.fif'"""
     trans = read_trans(fname)
     trans = invert_transform(trans)  # starts out as head->MRI, so invert
-    trans_2 = _get_mri_head_t_from_trans_file(fname_trans)
+    trans_2 = _get_mri_head_t(fname_trans)[0]
     assert_equal(trans['from'], trans_2['from'])
     assert_equal(trans['to'], trans_2['to'])
     assert_allclose(trans['trans'], trans_2['trans'], rtol=1e-5, atol=1e-5)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_io_trans():
     """Test reading and writing of trans files
     """
+    tempdir = _TempDir()
+    os.mkdir(op.join(tempdir, 'sample'))
+    assert_raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
     trans0 = read_trans(fname)
-    fname1 = op.join(tempdir, 'test-trans.fif')
+    fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
     write_trans(fname1, trans0)
+    assert_true(fname1 == _find_trans('sample', subjects_dir=tempdir))
     trans1 = read_trans(fname1)
 
     # check all properties
@@ -49,7 +59,7 @@ def test_io_trans():
 
     # check reading non -trans.fif files
     assert_raises(IOError, read_trans, fname_eve)
-    
+
     # check warning on bad filenames
     with warnings.catch_warnings(record=True) as w:
         fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
@@ -57,9 +67,74 @@ def test_io_trans():
     assert_true(len(w) >= 1)
 
 
+def test_get_ras_to_neuromag_trans():
+    """Test the coordinate transformation from ras to neuromag"""
+    # create model points in neuromag-like space
+    anterior = [0, 1, 0]
+    left = [-1, 0, 0]
+    right = [.8, 0, 0]
+    up = [0, 0, 1]
+    rand_pts = np.random.uniform(-1, 1, (3, 3))
+    pts = np.vstack((anterior, left, right, up, rand_pts))
+
+    # change coord system
+    rx, ry, rz, tx, ty, tz = np.random.uniform(-2 * np.pi, 2 * np.pi, 6)
+    trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
+    pts_changed = apply_trans(trans, pts)
+
+    # transform back into original space
+    nas, lpa, rpa = pts_changed[:3]
+    hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
+    pts_restored = apply_trans(hsp_trans, pts_changed)
+
+    err = "Neuromag transformation failed"
+    assert_array_almost_equal(pts_restored, pts, 6, err)
+
+
+def test_sphere_to_cartesian():
+    """Test helper transform function from sphere to cartesian"""
+    phi, theta, r = (np.pi, np.pi, 1)
+    # expected value is (1, 0, 0)
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    coord = _sphere_to_cartesian(phi, theta, r)
+    # np.pi is an approx since pi is irrational
+    assert_almost_equal(coord, (x, y, z), 10)
+    assert_almost_equal(coord, (1, 0, 0), 10)
+
+
+def test_polar_to_cartesian():
+    """Test helper transform function from polar to cartesian"""
+    r = 1
+    theta = np.pi
+    # expected values are (-1, 0)
+    x = r * np.cos(theta)
+    y = r * np.sin(theta)
+    coord = _polar_to_cartesian(theta, r)
+    # np.pi is an approx since pi is irrational
+    assert_almost_equal(coord, (x, y), 10)
+    assert_almost_equal(coord, (-1, 0), 10)
+
+
+def test_cartesian_to_sphere():
+    """Test helper transform function from cartesian to sphere"""
+    x, y, z = (1, 0, 0)
+    # expected values are (0, 0, 1)
+    hypotxy = np.hypot(x, y)
+    r = np.hypot(hypotxy, z)
+    elev = np.arctan2(z, hypotxy)
+    az = np.arctan2(y, x)
+    coord = _cartesian_to_sphere(x, y, z)
+    assert_equal(coord, (az, elev, r))
+    assert_equal(coord, (0, 0, 1))
+
+
 def test_rotation():
-    """Test conversion between rotation angles and transformation matrix"""
-    tests = [(0, 0, 1), (.5, .5, .5), (pi, 0, -1.5)]
+    """Test conversion between rotation angles and transformation matrix
+    """
+    tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
     for rot in tests:
         x, y, z = rot
         m = rotation3d(x, y, z)
@@ -69,3 +144,55 @@ def test_rotation():
         assert_equal(back, rot)
         back4 = rotation_angles(m4)
         assert_equal(back4, rot)
+
+
+ at testing.requires_testing_data
+def test_combine():
+    """Test combining transforms
+    """
+    trans = read_trans(fname)
+    inv = invert_transform(trans)
+    combine_transforms(trans, inv, trans['from'], trans['from'])
+    assert_raises(RuntimeError, combine_transforms, trans, inv,
+                  trans['to'], trans['from'])
+    assert_raises(RuntimeError, combine_transforms, trans, inv,
+                  trans['from'], trans['to'])
+    assert_raises(RuntimeError, combine_transforms, trans, trans,
+                  trans['from'], trans['to'])
+
+
+ at testing.requires_testing_data
+def test_transform_coords():
+    """Test transforming coordinates
+    """
+    # normal trans won't work
+    with warnings.catch_warnings(record=True):  # dep
+        assert_raises(ValueError, transform_coordinates,
+                      fname, np.eye(3), 'meg', 'fs_tal')
+    # needs to have all entries
+    pairs = [[FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD],
+             [FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_MNE_COORD_RAS],
+             [FIFF.FIFFV_MNE_COORD_RAS, FIFF.FIFFV_MNE_COORD_MNI_TAL],
+             [FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ],
+             [FIFF.FIFFV_MNE_COORD_MNI_TAL, FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ],
+             ]
+    xforms = []
+    for fro, to in pairs:
+        xforms.append({'to': to, 'from': fro, 'trans': np.eye(4)})
+    tempdir = _TempDir()
+    all_fname = op.join(tempdir, 'all-trans.fif')
+    with warnings.catch_warnings(record=True):  # dep
+        collect_transforms(all_fname, xforms)
+    for fro in ['meg', 'mri']:
+        for to in ['meg', 'mri', 'fs_tal', 'mni_tal']:
+            with warnings.catch_warnings(record=True):  # dep
+                out = transform_coordinates(all_fname, np.eye(3), fro, to)
+                assert_allclose(out, np.eye(3))
+    with warnings.catch_warnings(record=True):  # dep
+        assert_raises(ValueError, transform_coordinates, all_fname, np.eye(4),
+                      'meg', 'meg')
+        assert_raises(ValueError, transform_coordinates, all_fname, np.eye(3),
+                      'fs_tal', 'meg')
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_utils.py b/mne/tests/test_utils.py
index 25b587b..5dcb7e4 100644
--- a/mne/tests/test_utils.py
+++ b/mne/tests/test_utils.py
@@ -1,20 +1,26 @@
-from numpy.testing import assert_equal, assert_array_equal
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
 from nose.tools import assert_true, assert_raises, assert_not_equal
 from copy import deepcopy
 import os.path as op
 import numpy as np
+from scipy import sparse
 import os
 import warnings
-from mne.externals.six.moves import urllib
 
 from mne.utils import (set_log_level, set_log_file, _TempDir,
                        get_config, set_config, deprecated, _fetch_file,
-                       sum_squared, requires_mem_gb, estimate_rank,
-                       _url_to_local_path, sizeof_fmt,
+                       sum_squared, estimate_rank,
+                       _url_to_local_path, sizeof_fmt, _check_subject,
                        _check_type_picks, object_hash, object_diff,
-                       requires_good_network)
+                       requires_good_network, run_tests_if_main, md5sum,
+                       ArgvSetter, _memory_usage, check_random_state,
+                       _check_mayavi_version, requires_mayavi,
+                       set_memmap_min_size, _get_stim_channel, _check_fname,
+                       create_slices, _time_mask, random_permutation,
+                       _get_call_line, compute_corr, verbose)
 from mne.io import show_fiff
 from mne import Evoked
+from mne.externals.six.moves import StringIO
 
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
@@ -24,62 +30,200 @@ fname_evoked = op.join(base_dir, 'test-ave.fif')
 fname_raw = op.join(base_dir, 'test_raw.fif')
 fname_log = op.join(base_dir, 'test-ave.log')
 fname_log_2 = op.join(base_dir, 'test-ave-2.log')
-tempdir = _TempDir()
-test_name = op.join(tempdir, 'test.log')
 
 
-def clean_lines(lines):
+def clean_lines(lines=[]):
     # Function to scrub filenames for checking logging output (in test_logging)
     return [l if 'Reading ' not in l else 'Reading test file' for l in lines]
 
 
+def test_get_call_line():
+    """Test getting a call line
+    """
+    @verbose
+    def foo(verbose=None):
+        return _get_call_line(in_verbose=True)
+
+    for v in (None, True):
+        my_line = foo(verbose=v)  # testing
+        assert_equal(my_line, 'my_line = foo(verbose=v)  # testing')
+
+    def bar():
+        return _get_call_line(in_verbose=False)
+
+    my_line = bar()  # testing more
+    assert_equal(my_line, 'my_line = bar()  # testing more')
+
+
+def test_misc():
+    """Test misc utilities"""
+    assert_equal(_memory_usage(-1)[0], -1)
+    assert_equal(_memory_usage((clean_lines, [], {}))[0], -1)
+    assert_equal(_memory_usage(clean_lines)[0], -1)
+    assert_raises(ValueError, check_random_state, 'foo')
+    assert_raises(ValueError, set_memmap_min_size, 1)
+    assert_raises(ValueError, set_memmap_min_size, 'foo')
+    assert_raises(TypeError, get_config, 1)
+    assert_raises(TypeError, set_config, 1)
+    assert_raises(TypeError, set_config, 'foo', 1)
+    assert_raises(TypeError, _get_stim_channel, 1, None)
+    assert_raises(TypeError, _get_stim_channel, [1], None)
+    assert_raises(TypeError, _check_fname, 1)
+    assert_raises(ValueError, _check_subject, None, None)
+    assert_raises(ValueError, _check_subject, None, 1)
+    assert_raises(ValueError, _check_subject, 1, None)
+
+
+ at requires_mayavi
+def test_check_mayavi():
+    """Test mayavi version check"""
+    assert_raises(RuntimeError, _check_mayavi_version, '100.0.0')
+
+
+def test_run_tests_if_main():
+    """Test run_tests_if_main functionality"""
+    x = []
+
+    def test_a():
+        x.append(True)
+
+    @np.testing.dec.skipif(True)
+    def test_b():
+        return
+
+    try:
+        __name__ = '__main__'
+        run_tests_if_main(measure_mem=False)  # dual meas causes problems
+
+        def test_c():
+            raise RuntimeError
+
+        try:
+            __name__ = '__main__'
+            run_tests_if_main(measure_mem=False)  # dual meas causes problems
+        except RuntimeError:
+            pass
+        else:
+            raise RuntimeError('Error not raised')
+    finally:
+        del __name__
+    assert_true(len(x) == 2)
+    assert_true(x[0] and x[1])
+
+
 def test_hash():
     """Test dictionary hashing and comparison functions"""
     # does hashing all of these types work:
     # {dict, list, tuple, ndarray, str, float, int, None}
-    d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3))
+    d0 = dict(a=dict(a=0.1, b='fo', c=1), b=[1, 'b'], c=(), d=np.ones(3),
+              e=None)
     d0[1] = None
     d0[2.] = b'123'
 
     d1 = deepcopy(d0)
-    print(object_diff(d0, d1))
+    assert_true(len(object_diff(d0, d1)) == 0)
+    assert_true(len(object_diff(d1, d0)) == 0)
     assert_equal(object_hash(d0), object_hash(d1))
 
     # change values slightly
     d1['data'] = np.ones(3, int)
+    d1['d'][0] = 0
     assert_not_equal(object_hash(d0), object_hash(d1))
 
     d1 = deepcopy(d0)
-    print(object_diff(d0, d1))
     assert_equal(object_hash(d0), object_hash(d1))
     d1['a']['a'] = 0.11
-    object_diff(d0, d1)
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['a']['d'] = 0  # non-existent key
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
     assert_not_equal(object_hash(d0), object_hash(d1))
 
     d1 = deepcopy(d0)
-    print(object_diff(d0, d1))
     assert_equal(object_hash(d0), object_hash(d1))
+    d1['b'].append(0)  # different-length lists
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    assert_equal(object_hash(d0), object_hash(d1))
+    d1['e'] = 'foo'  # non-None
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+    assert_not_equal(object_hash(d0), object_hash(d1))
+
+    d1 = deepcopy(d0)
+    d2 = deepcopy(d0)
+    d1['e'] = StringIO()
+    d2['e'] = StringIO()
+    d2['e'].write('foo')
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
+
+    d1 = deepcopy(d0)
     d1[1] = 2
-    object_diff(d0, d1)
+    assert_true(len(object_diff(d0, d1)) > 0)
+    assert_true(len(object_diff(d1, d0)) > 0)
     assert_not_equal(object_hash(d0), object_hash(d1))
+
     # generators (and other types) not supported
+    d1 = deepcopy(d0)
+    d2 = deepcopy(d0)
     d1[1] = (x for x in d0)
+    d2[1] = (x for x in d0)
+    assert_raises(RuntimeError, object_diff, d1, d2)
     assert_raises(RuntimeError, object_hash, d1)
 
+    x = sparse.eye(2, 2, format='csc')
+    y = sparse.eye(2, 2, format='csr')
+    assert_true('type mismatch' in object_diff(x, y))
+    y = sparse.eye(2, 2, format='csc')
+    assert_equal(len(object_diff(x, y)), 0)
+    y[1, 1] = 2
+    assert_true('elements' in object_diff(x, y))
+    y = sparse.eye(3, 3, format='csc')
+    assert_true('shape' in object_diff(x, y))
+    y = 0
+    assert_true('type mismatch' in object_diff(x, y))
+
+
+def test_md5sum():
+    """Test md5sum calculation
+    """
+    tempdir = _TempDir()
+    fname1 = op.join(tempdir, 'foo')
+    fname2 = op.join(tempdir, 'bar')
+    with open(fname1, 'wb') as fid:
+        fid.write(b'abcd')
+    with open(fname2, 'wb') as fid:
+        fid.write(b'efgh')
+    assert_equal(md5sum(fname1), md5sum(fname1, 1))
+    assert_equal(md5sum(fname2), md5sum(fname2, 1024))
+    assert_true(md5sum(fname1) != md5sum(fname2))
+
 
 def test_tempdir():
     """Test TempDir
     """
     tempdir2 = _TempDir()
     assert_true(op.isdir(tempdir2))
-    tempdir2.cleanup()
-    assert_true(not op.isdir(tempdir2))
+    x = str(tempdir2)
+    del tempdir2
+    assert_true(not op.isdir(x))
 
 
 def test_estimate_rank():
     """Test rank estimation
     """
     data = np.eye(10)
+    assert_array_equal(estimate_rank(data, return_singular=True)[1],
+                       np.ones(10))
     data[0, 0] = 0
     assert_equal(estimate_rank(data), 9)
 
@@ -87,6 +231,9 @@ def test_estimate_rank():
 def test_logging():
     """Test logging (to file)
     """
+    assert_raises(ValueError, set_log_level, 'foo')
+    tempdir = _TempDir()
+    test_name = op.join(tempdir, 'test.log')
     with open(fname_log, 'r') as old_log_file:
         old_lines = clean_lines(old_log_file.readlines())
     with open(fname_log_2, 'r') as old_log_file_2:
@@ -158,6 +305,7 @@ def test_logging():
 
 def test_config():
     """Test mne-python config file support"""
+    tempdir = _TempDir()
     key = '_MNE_PYTHON_CONFIG_TESTING'
     value = '123456'
     old_val = os.getenv(key, None)
@@ -195,7 +343,7 @@ def test_show_fiff():
     keys = ['FIFF_EPOCH', 'FIFFB_HPI_COIL', 'FIFFB_PROJ_ITEM',
             'FIFFB_PROCESSED_DATA', 'FIFFB_EVOKED', 'FIFF_NAVE',
             'FIFF_EPOCH']
-    assert_true(all([key in info for key in keys]))
+    assert_true(all(key in info for key in keys))
     info = show_fiff(fname_raw, read_limit=1024)
 
 
@@ -206,20 +354,11 @@ def deprecated_func():
 
 @deprecated('message')
 class deprecated_class(object):
+
     def __init__(self):
         pass
 
 
- at requires_mem_gb(10000)
-def big_mem_func():
-    pass
-
-
- at requires_mem_gb(0)
-def no_mem_func():
-    pass
-
-
 def test_deprecated():
     """Test deprecated function
     """
@@ -233,52 +372,28 @@ def test_deprecated():
     assert_true(len(w) == 1)
 
 
-def test_requires_mem_gb():
-    """Test requires memory function
-    """
-    try:
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter('always')
-            big_mem_func()
-        assert_true(len(w) == 1)
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter('always')
-            no_mem_func()
-        assert_true(len(w) == 0)
-    except:
-        try:
-            import psutil
-            msg = ('psutil version %s exposes unexpected API' %
-                   psutil.__version__)
-        except ImportError:
-            msg = 'Could not import psutil'
-        from nose.plugins.skip import SkipTest
-        SkipTest(msg)
-
-
 @requires_good_network
 def test_fetch_file():
     """Test file downloading
     """
-    # Skipping test if no internet connection available
-    try:
-        urllib.request.urlopen("http://github.com", timeout=2)
-    except:
-        from nose.plugins.skip import SkipTest
-        raise SkipTest('No internet connection, skipping download test.')
-
-    urls = ['http://github.com/mne-tools/mne-python/blob/master/README.rst',
+    tempdir = _TempDir()
+    urls = ['http://martinos.org/mne/',
             'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt']
-    for url in urls:
-        archive_name = op.join(tempdir, "download_test")
-        _fetch_file(url, archive_name, print_destination=False)
-        assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
-                      op.join(tempdir, 'test'))
-        resume_name = op.join(tempdir, "download_resume")
-        # touch file
-        with open(resume_name + '.part', 'w'):
-            os.utime(resume_name + '.part', None)
-        _fetch_file(url, resume_name, print_destination=False, resume=True)
+    with ArgvSetter(disable_stderr=False):  # to capture stdout
+        for url in urls:
+            archive_name = op.join(tempdir, "download_test")
+            _fetch_file(url, archive_name, verbose=False)
+            assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
+                          op.join(tempdir, 'test'), verbose=False)
+            resume_name = op.join(tempdir, "download_resume")
+            # touch file
+            with open(resume_name + '.part', 'w'):
+                os.utime(resume_name + '.part', None)
+            _fetch_file(url, resume_name, resume=True, verbose=False)
+            assert_raises(ValueError, _fetch_file, url, archive_name,
+                          hash_='a', verbose=False)
+            assert_raises(RuntimeError, _fetch_file, url, archive_name,
+                          hash_='a' * 32, verbose=False)
 
 
 def test_sum_squared():
@@ -316,3 +431,86 @@ def test_check_type_picks():
     assert_raises(ValueError, _check_type_picks, picks)
     picks = 'b'
     assert_raises(ValueError, _check_type_picks, picks)
+
+
+def test_compute_corr():
+    """Test Anscombe's Quartett
+    """
+    x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
+    y = np.array([[8.04, 6.95, 7.58, 8.81, 8.33, 9.96,
+                   7.24, 4.26, 10.84, 4.82, 5.68],
+                  [9.14, 8.14, 8.74, 8.77, 9.26, 8.10,
+                   6.13, 3.10, 9.13, 7.26, 4.74],
+                  [7.46, 6.77, 12.74, 7.11, 7.81, 8.84,
+                   6.08, 5.39, 8.15, 6.42, 5.73],
+                  [8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8],
+                  [6.58, 5.76, 7.71, 8.84, 8.47, 7.04,
+                   5.25, 12.50, 5.56, 7.91, 6.89]])
+
+    r = compute_corr(x, y.T)
+    r2 = np.array([np.corrcoef(x, y[i])[0, 1]
+                   for i in range(len(y))])
+    assert_allclose(r, r2)
+    assert_raises(ValueError, compute_corr, [1, 2], [])
+
+
+def test_create_slices():
+    """Test checking the create of time create_slices
+    """
+    # Test that create_slices default provide an empty list
+    assert_true(create_slices(0, 0) == [])
+    # Test that create_slice return correct number of slices
+    assert_true(len(create_slices(0, 100)) == 100)
+    # Test with non-zero start parameters
+    assert_true(len(create_slices(50, 100)) == 50)
+    # Test slices' length with non-zero start and window_width=2
+    assert_true(len(create_slices(0, 100, length=2)) == 50)
+    # Test slices' length with manual slice separation
+    assert_true(len(create_slices(0, 100, step=10)) == 10)
+    # Test slices' within length for non-consecutive samples
+    assert_true(len(create_slices(0, 500, length=50, step=10)) == 46)
+    # Test that slices elements start, stop and step correctly
+    slices = create_slices(0, 10)
+    assert_true(slices[0].start == 0)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 1)
+    assert_true(slices[-1].stop == 10)
+    # Same with larger window width
+    slices = create_slices(0, 9, length=3)
+    assert_true(slices[0].start == 0)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 3)
+    assert_true(slices[-1].stop == 9)
+    # Same with manual slices' separation
+    slices = create_slices(0, 9, length=3, step=1)
+    assert_true(len(slices) == 7)
+    assert_true(slices[0].step == 1)
+    assert_true(slices[0].stop == 3)
+    assert_true(slices[-1].start == 6)
+    assert_true(slices[-1].stop == 9)
+
+
+def test_time_mask():
+    """Test safe time masking
+    """
+    N = 10
+    x = np.arange(N).astype(float)
+    assert_equal(_time_mask(x, 0, N - 1).sum(), N)
+    assert_equal(_time_mask(x - 1e-10, 0, N - 1).sum(), N)
+    assert_equal(_time_mask(x - 1e-10, 0, N - 1, strict=True).sum(), N - 1)
+
+
+def test_random_permutation():
+    """Test random permutation function
+    """
+    n_samples = 10
+    random_state = 42
+    python_randperm = random_permutation(n_samples, random_state)
+
+    # matlab output when we execute rng(42), randperm(10)
+    matlab_randperm = np.array([7, 6, 5, 1, 4, 9, 10, 3, 8, 2])
+
+    assert_array_equal(python_randperm, matlab_randperm - 1)
+
+
+run_tests_if_main()
diff --git a/mne/time_frequency/__init__.py b/mne/time_frequency/__init__.py
index 4e8837c..14c92fb 100644
--- a/mne/time_frequency/__init__.py
+++ b/mne/time_frequency/__init__.py
@@ -1,10 +1,11 @@
 """Time frequency analysis tools
 """
 
-from .tfr import induced_power, single_trial_power, morlet, tfr_morlet
-from .tfr import AverageTFR
+from .tfr import (single_trial_power, morlet, tfr_morlet, cwt_morlet,
+                  AverageTFR, tfr_multitaper, read_tfrs, write_tfrs)
 from .psd import compute_raw_psd, compute_epochs_psd
 from .csd import CrossSpectralDensity, compute_epochs_csd
-from .ar import yule_walker, ar_raw, iir_filter_raw
+from .ar import fit_iir_model_raw
 from .multitaper import dpss_windows, multitaper_psd
 from .stft import stft, istft, stftfreq
+from ._stockwell import tfr_stockwell
diff --git a/mne/time_frequency/_stockwell.py b/mne/time_frequency/_stockwell.py
new file mode 100644
index 0000000..a9f703e
--- /dev/null
+++ b/mne/time_frequency/_stockwell.py
@@ -0,0 +1,255 @@
+# Authors : Denis A. Engemann <denis.engemann at gmail.com>
+#           Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License : BSD 3-clause
+
+from copy import deepcopy
+import math
+import numpy as np
+from scipy import fftpack
+# XXX explore cuda optimazation at some point.
+
+from ..io.pick import pick_types, pick_info
+from ..utils import logger, verbose
+from ..parallel import parallel_func, check_n_jobs
+from .tfr import AverageTFR, _get_data
+
+
+def _check_input_st(x_in, n_fft):
+    """Aux function"""
+    # flatten to 2 D and memorize original shape
+    n_times = x_in.shape[-1]
+
+    def _is_power_of_two(n):
+        return not (n > 0 and ((n & (n - 1))))
+
+    if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
+        # Compute next power of 2
+        n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
+    elif n_fft < n_times:
+        raise ValueError("n_fft cannot be smaller than signal size. "
+                         "Got %s < %s." % (n_fft, n_times))
+    zero_pad = None
+    if n_times < n_fft:
+        msg = ('The input signal is shorter ({0}) than "n_fft" ({1}). '
+               'Applying zero padding.').format(x_in.shape[-1], n_fft)
+        logger.warning(msg)
+        zero_pad = n_fft - n_times
+        pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
+        x_in = np.concatenate((x_in, pad_array), axis=-1)
+        return x_in, n_fft, zero_pad
+
+
+def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
+    """Precompute stockwell gausian windows (in the freq domain)"""
+    tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
+    tw = np.r_[tw[:1], tw[1:][::-1]]
+
+    k = width  # 1 for classical stowckwell transform
+    f_range = np.arange(start_f, stop_f, 1)
+    windows = np.empty((len(f_range), len(tw)), dtype=np.complex)
+    for i_f, f in enumerate(f_range):
+        if f == 0.:
+            window = np.ones(len(tw))
+        else:
+            window = ((f / (np.sqrt(2. * np.pi) * k)) *
+                      np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
+        window /= window.sum()  # normalisation
+        windows[i_f] = fftpack.fft(window)
+    return windows
+
+
+def _st(x, start_f, windows):
+    """Implementation based on Ali Moukadem Matlab code (only used in tests)"""
+    n_samp = x.shape[-1]
+    ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex)
+    # do the work
+    Fx = fftpack.fft(x)
+    XF = np.concatenate([Fx, Fx], axis=-1)
+    for i_f, window in enumerate(windows):
+        f = start_f + i_f
+        ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
+    return ST
+
+
+def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
+    """Aux function"""
+    n_samp = x.shape[-1]
+    n_out = (n_samp - zero_pad)
+    n_out = n_out // decim + bool(n_out % decim)
+    psd = np.empty((len(W), n_out))
+    itc = np.empty_like(psd) if compute_itc else None
+    X = fftpack.fft(x)
+    XX = np.concatenate([X, X], axis=-1)
+    for i_f, window in enumerate(W):
+        f = start_f + i_f
+        ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
+        TFR = ST[:, :-zero_pad:decim]
+        TFR_abs = np.abs(TFR)
+        if compute_itc:
+            TFR /= TFR_abs
+            itc[i_f] = np.abs(np.mean(TFR, axis=0))
+        TFR_abs *= TFR_abs
+        psd[i_f] = np.mean(TFR_abs, axis=0)
+    return psd, itc
+
+
+def _induced_power_stockwell(data, sfreq, fmin, fmax, n_fft=None, width=1.0,
+                             decim=1, return_itc=False, n_jobs=1):
+    """Computes power and intertrial coherence using Stockwell (S) transform
+
+    Parameters
+    ----------
+    data : ndarray
+        The signal to transform. Any dimensionality supported as long
+        as the last dimension is time.
+    sfreq : float
+        The sampling frequency.
+    fmin : None, float
+        The minimum frequency to include. If None defaults to the minimum fft
+        frequency greater than zero.
+    fmax : None, float
+        The maximum frequency to include. If None defaults to the maximum fft.
+    n_fft : int | None
+        The length of the windows used for FFT. If None, it defaults to the
+        next power of 2 larger than the signal length.
+    width : float
+        The width of the Gaussian window. If < 1, increased temporal
+        resolution, if > 1, increased frequency resolution. Defaults to 1.
+        (classical S-Transform).
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+    n_jobs : int
+        Number of parallel jobs to use.
+
+    Returns
+    -------
+    st_power : ndarray
+        The multitaper power of the Stockwell transformed data.
+        The last two dimensions are frequency and time.
+    itc : ndarray
+        The intertrial coherence. Only returned if return_itc is True.
+    freqs : ndarray
+        The frequencies.
+
+    References
+    ----------
+    Stockwell, R. G. "Why use the S-transform." AMS Pseudo-differential
+        operators: Partial differential equations and time-frequency
+        analysis 52 (2007): 279-309.
+    Moukadem, A., Bouguila, Z., Abdeslam, D. O, and Dieterlen, A. Stockwell
+        transform optimization applied on the detection of split in heart
+        sounds (2014). Signal Processing Conference (EUSIPCO), 2013 Proceedings
+        of the 22nd European, pages 2015--2019.
+    Wheat, K., Cornelissen, P. L., Frost, S.J, and Peter C. Hansen (2010).
+        During Visual Word Recognition, Phonology Is Accessed
+        within 100 ms and May Be Mediated by a Speech Production
+        Code: Evidence from Magnetoencephalography. The Journal of
+        Neuroscience, 30 (15), 5229-5233.
+    K. A. Jones and B. Porjesz and D. Chorlian and M. Rangaswamy and C.
+        Kamarajan and A. Padmanabhapillai and A. Stimus and H. Begleiter
+        (2006). S-transform time-frequency analysis of P300 reveals deficits in
+        individuals diagnosed with alcoholism.
+        Clinical Neurophysiology 117 2128--2143
+    """
+    n_epochs, n_channels = data.shape[:2]
+    n_out = data.shape[2] // decim + bool(data.shape[2] % decim)
+    data, n_fft_, zero_pad = _check_input_st(data, n_fft)
+
+    freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
+    if fmin is None:
+        fmin = freqs[freqs > 0][0]
+    if fmax is None:
+        fmax = freqs.max()
+
+    start_f = np.abs(freqs - fmin).argmin()
+    stop_f = np.abs(freqs - fmax).argmin()
+    freqs = freqs[start_f:stop_f]
+
+    W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
+    n_freq = stop_f - start_f
+    psd = np.empty((n_channels, n_freq, n_out))
+    itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
+
+    parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
+    tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
+                          decim, W)
+                    for c in range(n_channels))
+    for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
+        psd[c] = this_psd
+        if this_itc is not None:
+            itc[c] = this_itc
+
+    return psd, itc, freqs
+
+
+ at verbose
+def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
+                  width=1.0, decim=1, return_itc=False, n_jobs=1,
+                  verbose=None):
+    """Time-Frequency Representation (TFR) using Stockwell Transform
+
+    Parameters
+    ----------
+    inst : Epochs | Evoked
+        The epochs or evoked object.
+    fmin : None, float
+        The minimum frequency to include. If None defaults to the minimum fft
+        frequency greater than zero.
+    fmax : None, float
+        The maximum frequency to include. If None defaults to the maximum fft.
+    n_fft : int | None
+        The length of the windows used for FFT. If None, it defaults to the
+        next power of 2 larger than the signal length.
+    width : float
+        The width of the Gaussian window. If < 1, increased temporal
+        resolution, if > 1, increased frequency resolution. Defaults to 1.
+        (classical S-Transform).
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+    n_jobs : int
+        The number of jobs to run in parallel (over channels).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : AverageTFR
+        The averaged power.
+    itc : AverageTFR
+        The intertrial coherence. Only returned if return_itc is True.
+
+    See Also
+    --------
+    cwt : Compute time-frequency decomposition with user-provided wavelets
+    cwt_morlet, multitaper_psd
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    # verbose dec is used b/c subfunctions are verbose
+    data = _get_data(inst, return_itc)
+    picks = pick_types(inst.info, meg=True, eeg=True)
+    info = pick_info(inst.info, picks)
+    data = data[:, picks, :]
+    n_jobs = check_n_jobs(n_jobs)
+    power, itc, freqs = _induced_power_stockwell(data,
+                                                 sfreq=info['sfreq'],
+                                                 fmin=fmin, fmax=fmax,
+                                                 n_fft=n_fft,
+                                                 width=width,
+                                                 decim=decim,
+                                                 return_itc=return_itc,
+                                                 n_jobs=n_jobs)
+    times = inst.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
+    if return_itc:
+        out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
+                               freqs.copy(), nave, method='stockwell-itc'))
+    return out
diff --git a/mne/time_frequency/ar.py b/mne/time_frequency/ar.py
index 2feba20..8be7039 100644
--- a/mne/time_frequency/ar.py
+++ b/mne/time_frequency/ar.py
@@ -6,10 +6,14 @@
 import numpy as np
 from scipy.linalg import toeplitz
 
+from ..io.pick import pick_types
+from ..utils import verbose
+
 
 # XXX : Back ported from statsmodels
 
-def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
+def yule_walker(X, order=1, method="unbiased", df=None, inv=False,
+                demean=True):
     """
     Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
 
@@ -31,8 +35,9 @@ def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
        denominator is n=X.shape[0], if "unbiased" the denominator is n-k.
        The default is unbiased.
     df : integer, optional
-       Specifies the degrees of freedom. If `df` is supplied, then it is assumed
-       the X has `df` degrees of freedom rather than `n`.  Default is None.
+       Specifies the degrees of freedom. If `df` is supplied, then it is
+       assumed the X has `df` degrees of freedom rather than `n`.  Default is
+       None.
     inv : bool
         If inv is True the inverse of R is also returned.  Default is False.
     demean : bool
@@ -46,32 +51,34 @@ def yule_walker(X, order=1, method="unbiased", df=None, inv=False, demean=True):
         TODO
 
     """
-#TODO: define R better, look back at notes and technical notes on YW.
-#First link here is useful
-#http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
+    # TODO: define R better, look back at notes and technical notes on YW.
+    # First link here is useful
+    # http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm  # noqa
     method = str(method).lower()
     if method not in ["unbiased", "mle"]:
         raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
-    X = np.array(X)
+    X = np.array(X, float)
     if demean:
         X -= X.mean()                  # automatically demean's X
     n = df or X.shape[0]
 
     if method == "unbiased":        # this is df_resid ie., n - p
-        denom = lambda k: n - k
+        def denom(k):
+            return n - k
     else:
-        denom = lambda k: n
+        def denom(k):
+            return n
     if X.ndim > 1 and X.shape[1] != 1:
         raise ValueError("expecting a vector to estimate AR parameters")
-    r = np.zeros(order+1, np.float64)
-    r[0] = (X**2).sum() / denom(0)
-    for k in range(1,order+1):
-        r[k] = (X[0:-k]*X[k:]).sum() / denom(k)
+    r = np.zeros(order + 1, np.float64)
+    r[0] = (X ** 2).sum() / denom(0)
+    for k in range(1, order + 1):
+        r[k] = (X[0:-k] * X[k:]).sum() / denom(k)
     R = toeplitz(r[:-1])
 
     rho = np.linalg.solve(R, r[1:])
-    sigmasq = r[0] - (r[1:]*rho).sum()
-    if inv == True:
+    sigmasq = r[0] - (r[1:] * rho).sum()
+    if inv:
         return rho, np.sqrt(sigmasq), np.linalg.inv(R)
     else:
         return rho, np.sqrt(sigmasq)
@@ -115,38 +122,44 @@ def ar_raw(raw, order, picks, tmin=None, tmax=None):
     return coefs
 
 
-def iir_filter_raw(raw, order, picks, tmin=None, tmax=None):
+ at verbose
+def fit_iir_model_raw(raw, order=2, picks=None, tmin=None, tmax=None,
+                      verbose=None):
     """Fits an AR model to raw data and creates the corresponding IIR filter
 
     The computed filter is the average filter for all the picked channels.
-    The returned filter coefficents are the denominator of the filter
-    (the numerator is 1). The frequency response is given by
+    The frequency response is given by:
+
+    .. math::
 
-        jw   1
-     H(e) = --------------------------------
-                        -jw             -jnw
-            a[0] + a[1]e    + ... + a[n]e
+        H(e^{jw}) = \\frac{1}{a[0] + a[1]e^{-jw} + ...
+                                  + a[n]e^{-jnw}}
 
     Parameters
     ----------
     raw : Raw object
-        an instance of Raw
+        an instance of Raw.
     order : int
-        order of the FIR filter
-    picks : array-like of int
-        indices of selected channels
+        order of the FIR filter.
+    picks : array-like of int | None
+        indices of selected channels. If None, MEG and EEG channels are used.
     tmin : float
         The beginning of time interval in seconds.
     tmax : float
         The end of time interval in seconds.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
-    a : array
-        filter coefficients
+    b : ndarray
+        Numerator filter coefficients.
+    a : ndarray
+        Denominator filter coefficients
     """
-    picks = picks[:5]
+    if picks is None:
+        picks = pick_types(raw.info, meg=True, eeg=True)
     coefs = ar_raw(raw, order=order, picks=picks, tmin=tmin, tmax=tmax)
     mean_coefs = np.mean(coefs, axis=0)  # mean model across channels
-    a = np.r_[1, -mean_coefs]  # filter coefficients
-    return a
+    a = np.concatenate(([1.], -mean_coefs))  # filter coefficients
+    return np.array([1.]), a
diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py
index bb40349..e147da2 100644
--- a/mne/time_frequency/csd.py
+++ b/mne/time_frequency/csd.py
@@ -30,8 +30,6 @@ class CrossSpectralDensity(object):
     frequencies : float | list of float
         Frequency or frequencies for which the CSD matrix was calculated. If a
         list is passed, data is a sum across CSD matrices for all frequencies.
-    sfreq : float
-        Sampling frequency of the data from which the CSD was obtained.
     n_fft : int
         Length of the FFT used when calculating the CSD matrix.
     """
@@ -146,9 +144,9 @@ def compute_epochs_csd(epochs, mode='multitaper', fmin=0, fmax=np.inf,
 
     # Preparing frequencies of interest
     sfreq = epochs.info['sfreq']
-    frequencies = fftfreq(n_fft, 1. / sfreq)
-    freq_mask = (frequencies > fmin) & (frequencies < fmax)
-    frequencies = frequencies[freq_mask]
+    orig_frequencies = fftfreq(n_fft, 1. / sfreq)
+    freq_mask = (orig_frequencies > fmin) & (orig_frequencies < fmax)
+    frequencies = orig_frequencies[freq_mask]
     n_freqs = len(frequencies)
 
     if n_freqs == 0:
@@ -189,6 +187,9 @@ def compute_epochs_csd(epochs, mode='multitaper', fmin=0, fmax=np.inf,
     csds_mean = np.zeros((len(ch_names), len(ch_names), n_freqs),
                          dtype=complex)
 
+    # Picking frequencies of interest
+    freq_mask_mt = freq_mask[orig_frequencies >= 0]
+
     # Compute CSD for each epoch
     n_epochs = 0
     for epoch in epochs:
@@ -213,8 +214,7 @@ def compute_epochs_csd(epochs, mode='multitaper', fmin=0, fmax=np.inf,
                 # Hack so we can sum over axis=-2
                 weights = np.array([1.])[:, None, None, None]
 
-        # Picking frequencies of interest
-        x_mt = x_mt[:, :, freq_mask]
+        x_mt = x_mt[:, :, freq_mask_mt]
 
         # Calculating CSD
         # Tiling x_mt so that we can easily use _csd_from_mt()
diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py
index 1985ee3..37061a9 100644
--- a/mne/time_frequency/multitaper.py
+++ b/mne/time_frequency/multitaper.py
@@ -5,7 +5,8 @@
 from warnings import warn
 
 import numpy as np
-from scipy import fftpack, linalg, interpolate
+from scipy import fftpack, linalg
+import warnings
 
 from ..parallel import parallel_func
 from ..utils import verbose, sum_squared
@@ -146,6 +147,7 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
     uncertainty V: The discrete case. Bell System Technical Journal,
     Volume 57 (1978), 1371430
     """
+    from scipy.interpolate import interp1d
     Kmax = int(Kmax)
     W = float(half_nbw) / N
     nidx = np.arange(N, dtype='d')
@@ -162,7 +164,7 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
         d, e = dpss_windows(interp_from, half_nbw, Kmax, low_bias=False)
         for this_d in d:
             x = np.arange(this_d.shape[-1])
-            I = interpolate.interp1d(x, this_d, kind=interp_kind)
+            I = interp1d(x, this_d, kind=interp_kind)
             d_temp = I(np.arange(0, this_d.shape[-1] - 1,
                                  float(this_d.shape[-1] - 1) / N))
 
@@ -239,8 +241,12 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
 
     if low_bias:
         idx = (eigvals > 0.9)
+        if not idx.any():
+            warnings.warn('Could not properly use low_bias, '
+                          'keeping lowest-bias taper')
+            idx = [np.argmax(eigvals)]
         dpss, eigvals = dpss[idx], eigvals[idx]
-
+    assert len(dpss) > 0  # should never happen
     return dpss, eigvals
 
 
@@ -373,10 +379,9 @@ def _psd_from_mt(x_mt, weights):
     psd : array
         The computed PSD
     """
-
-    psd = np.sum(np.abs(weights * x_mt) ** 2, axis=-2)
-    psd *= 2 / np.sum(np.abs(weights) ** 2, axis=-2)
-
+    psd = weights * x_mt
+    psd = (psd * psd.conj()).real.sum(axis=-2)
+    psd *= 2 / (weights * weights.conj()).real.sum(axis=-2)
     return psd
 
 
@@ -399,14 +404,10 @@ def _csd_from_mt(x_mt, y_mt, weights_x, weights_y):
     psd: array
         The computed PSD
     """
-
     csd = np.sum(weights_x * x_mt * (weights_y * y_mt).conj(), axis=-2)
-
-    denom = (np.sqrt(np.sum(np.abs(weights_x) ** 2, axis=-2))
-             * np.sqrt(np.sum(np.abs(weights_y) ** 2, axis=-2)))
-
+    denom = (np.sqrt((weights_x * weights_x.conj()).real.sum(axis=-2)) *
+             np.sqrt((weights_y * weights_y.conj()).real.sum(axis=-2)))
     csd *= 2 / denom
-
     return csd
 
 
@@ -489,6 +490,14 @@ def multitaper_psd(x, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
         The computed PSD.
     freqs : array
         The frequency points in Hz of the PSD.
+
+    See Also
+    --------
+    mne.io.Raw.plot_psd, mne.Epochs.plot_psd
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
     if normalization not in ('length', 'full'):
         raise ValueError('Normalization must be "length" or "full", not %s'
diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py
index bd719d5..c728163 100644
--- a/mne/time_frequency/psd.py
+++ b/mne/time_frequency/psd.py
@@ -7,20 +7,24 @@ import numpy as np
 from ..parallel import parallel_func
 from ..io.proj import make_projector_info
 from ..io.pick import pick_types
-from ..utils import logger, verbose
+from ..utils import logger, verbose, _time_mask
 
 
 @verbose
-def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
-                    fmin=0, fmax=np.inf, n_fft=2048, pad_to=None, n_overlap=0,
-                    n_jobs=1, plot=False, proj=False, NFFT=None,
-                    verbose=None):
+def compute_raw_psd(raw, tmin=0., tmax=None, picks=None, fmin=0,
+                    fmax=np.inf, n_fft=2048, n_overlap=0,
+                    proj=False, n_jobs=1, verbose=None):
     """Compute power spectral density with average periodograms.
 
     Parameters
     ----------
     raw : instance of Raw
         The raw data.
+    tmin : float
+        Minimum time instant to consider (in seconds).
+    tmax : float | None
+        Maximum time instant to consider (in seconds). None will use the
+        end of the file.
     picks : array-like of int | None
         The selection of channels to include in the computation.
         If None, take all channels.
@@ -31,18 +35,13 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
     n_fft : int
         The length of the tapers ie. the windows. The smaller
         it is the smoother are the PSDs.
-    pad_to : int | None
-        The number of points to which the data segment is padded when
-        performing the FFT. If None, pad_to equals `NFFT`.
     n_overlap : int
         The number of points of overlap between blocks. The default value
         is 0 (no overlap).
+    proj : bool
+        Apply SSP projection vectors.
     n_jobs : int
         Number of CPUs to use in the computation.
-    plot : bool
-        Plot each PSD estimates
-    proj : bool
-        Apply SSP projection vectors
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -53,15 +52,14 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
     freqs: array of float
         The frequencies
     """
-    if NFFT is not None:
-        n_fft = NFFT
-        warnings.warn("`NFFT` is deprecated and will be removed in v0.9. "
-                      "Use `n_fft` instead")
+    from scipy.signal import welch
+    tmax = raw.times[-1] if tmax is None else tmax
     start, stop = raw.time_as_index([tmin, tmax])
     if picks is not None:
         data, times = raw[picks, start:(stop + 1)]
     else:
         data, times = raw[:, start:(stop + 1)]
+    n_fft, n_overlap = _check_nfft(len(times), n_fft, n_overlap)
 
     if proj:
         proj, _ = make_projector_info(raw.info)
@@ -75,21 +73,25 @@ def compute_raw_psd(raw, tmin=0, tmax=np.inf, picks=None,
 
     logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
 
-    import matplotlib.pyplot as plt
-    parallel, my_psd, n_jobs = parallel_func(plt.psd, n_jobs)
-    fig = plt.figure()
-    out = parallel(my_psd(d, Fs=Fs, NFFT=n_fft, noverlap=n_overlap,
-                          pad_to=pad_to) for d in data)
-    if not plot:
-        plt.close(fig)
-    freqs = out[0][1]
-    psd = np.array([o[0] for o in out])
+    parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
+                                                verbose=verbose)
 
-    mask = (freqs >= fmin) & (freqs <= fmax)
-    freqs = freqs[mask]
-    psd = psd[:, mask]
+    freqs = np.arange(n_fft // 2 + 1) * (Fs / n_fft)
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freq_mask]
 
-    return psd, freqs
+    psds = np.array(parallel(my_pwelch([channel],
+                                       noverlap=n_overlap, nfft=n_fft, fs=Fs,
+                                       freq_mask=freq_mask, welch_fun=welch)
+                             for channel in data))[:, 0, :]
+
+    return psds, freqs
+
+
+def _pwelch(epoch, noverlap, nfft, fs, freq_mask, welch_fun):
+    """Aux function"""
+    return welch_fun(epoch, nperseg=nfft, noverlap=noverlap,
+                     nfft=nfft, fs=fs)[1][..., freq_mask]
 
 
 def _compute_psd(data, fmin, fmax, Fs, n_fft, psd, n_overlap, pad_to):
@@ -103,19 +105,23 @@ def _compute_psd(data, fmin, fmax, Fs, n_fft, psd, n_overlap, pad_to):
     return psd[:, mask], freqs
 
 
+def _check_nfft(n, n_fft, n_overlap):
+    """Helper to make sure n_fft and n_overlap make sense"""
+    n_fft = n if n_fft > n else n_fft
+    n_overlap = n_fft - 1 if n_overlap >= n_fft else n_overlap
+    return n_fft, n_overlap
+
+
 @verbose
-def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
-                       pad_to=None, n_overlap=0, n_jobs=1, verbose=None):
-    """Compute power spectral density with with average periodograms.
+def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, tmin=None,
+                       tmax=None, n_fft=256, n_overlap=0, proj=False,
+                       n_jobs=1, verbose=None):
+    """Compute power spectral density with average periodograms.
 
     Parameters
     ----------
     epochs : instance of Epochs
         The epochs.
-    tmin : float
-        Min time instant to consider
-    tmax : float
-        Max time instant to consider
     picks : array-like of int | None
         The selection of channels to include in the computation.
         If None, take all channels.
@@ -123,15 +129,20 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
         Min frequency of interest
     fmax : float
         Max frequency of interest
+    tmin : float | None
+        Min time of interest
+    tmax : float | None
+        Max time of interest
     n_fft : int
         The length of the tapers ie. the windows. The smaller
-        it is the smoother are the PSDs.
-    pad_to : int | None
-        The number of points to which the data segment is padded when
-        performing the FFT. If None, pad_to equals `n_fft`.
+        it is the smoother are the PSDs. The default value is 256.
+        If ``n_fft > len(epochs.times)``, it will be adjusted down to
+        ``len(epochs.times)``.
     n_overlap : int
-        The number of points of overlap between blocks. The default value
-        is 0 (no overlap).
+        The number of points of overlap between blocks. Will be adjusted
+        to be <= n_fft.
+    proj : bool
+        Apply SSP projection vectors.
     n_jobs : int
         Number of CPUs to use in the computation.
     verbose : bool, str, int, or None
@@ -144,22 +155,45 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256,
     freqs : ndarray (n_freqs)
         The frequencies.
     """
-
+    from scipy.signal import welch
     n_fft = int(n_fft)
     Fs = epochs.info['sfreq']
     if picks is None:
         picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
                            exclude='bads')
+    n_fft, n_overlap = _check_nfft(len(epochs.times), n_fft, n_overlap)
+
+    if tmin is not None or tmax is not None:
+        time_mask = _time_mask(epochs.times, tmin, tmax)
+    else:
+        time_mask = Ellipsis
+
+    data = epochs.get_data()[:, picks][..., time_mask]
+    if proj:
+        proj, _ = make_projector_info(epochs.info)
+        if picks is not None:
+            data = np.dot(proj[picks][:, picks], data)
+        else:
+            data = np.dot(proj, data)
 
     logger.info("Effective window size : %0.3f (s)" % (n_fft / float(Fs)))
-    psds = []
-    import matplotlib.pyplot as plt
-    parallel, my_psd, n_jobs = parallel_func(_compute_psd, n_jobs)
-    fig = plt.figure()  # threading will induce errors otherwise
-    out = parallel(my_psd(data[picks], fmin, fmax, Fs, n_fft, plt.psd,
-                          n_overlap, pad_to)
-                   for data in epochs)
-    plt.close(fig)
-    psds = [o[0] for o in out]
-    freqs = [o[1] for o in out]
-    return np.array(psds), freqs[0]
+
+    freqs = np.arange(n_fft // 2 + 1, dtype=float) * (Fs / n_fft)
+    freq_mask = (freqs >= fmin) & (freqs <= fmax)
+    freqs = freqs[freq_mask]
+    psds = np.empty(data.shape[:-1] + (freqs.size,))
+
+    parallel, my_pwelch, n_jobs = parallel_func(_pwelch, n_jobs=n_jobs,
+                                                verbose=verbose)
+
+    for idx, fepochs in zip(np.array_split(np.arange(len(data)), n_jobs),
+                            parallel(my_pwelch(epoch, noverlap=n_overlap,
+                                               nfft=n_fft, fs=Fs,
+                                               freq_mask=freq_mask,
+                                               welch_fun=welch)
+                                     for epoch in np.array_split(data,
+                                                                 n_jobs))):
+        for i_epoch, f_epoch in zip(idx, fepochs):
+            psds[i_epoch, :, :] = f_epoch
+
+    return psds, freqs
diff --git a/mne/time_frequency/stft.py b/mne/time_frequency/stft.py
index 086a8b5..83e2733 100644
--- a/mne/time_frequency/stft.py
+++ b/mne/time_frequency/stft.py
@@ -30,12 +30,12 @@ def stft(x, wsize, tstep=None, verbose=None):
         STFT coefficients for positive frequencies with
         n_step = ceil(T / tstep)
 
-    Usage
-    -----
+    Examples
+    --------
     X = stft(x, wsize)
     X = stft(x, wsize, tstep)
 
-    See also
+    See Also
     --------
     istft
     stftfreq
@@ -49,7 +49,7 @@ def stft(x, wsize, tstep=None, verbose=None):
     n_signals, T = x.shape
     wsize = int(wsize)
 
-    ### Errors and warnings ###
+    # Errors and warnings
     if wsize % 4:
         raise ValueError('The window length must be a multiple of 4.')
 
@@ -120,16 +120,16 @@ def istft(X, tstep=None, Tx=None):
     x : 1d array of length Tx
         vector containing the inverse STFT signal
 
-    Usage
-    -----
+    Examples
+    --------
     x = istft(X)
     x = istft(X, tstep)
 
-    See also
+    See Also
     --------
     stft
     """
-    ### Errors and warnings ###
+    # Errors and warnings
     n_signals, n_win, n_step = X.shape
     if (n_win % 2 == 0):
         ValueError('The number of rows of the STFT matrix must be odd.')
@@ -159,10 +159,10 @@ def istft(X, tstep=None, Tx=None):
     if n_signals == 0:
         return x[:, :Tx]
 
-    ### Computing inverse STFT signal ###
     # Defining sine window
     win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
     # win = win / norm(win);
+
     # Pre-processing for edges
     swin = np.zeros(T + wsize - tstep, dtype=np.float)
     for t in range(n_step):
@@ -201,7 +201,7 @@ def stftfreq(wsize, sfreq=None):
         The positive frequencies returned by stft
 
 
-    See also
+    See Also
     --------
     stft
     istft
@@ -228,9 +228,10 @@ def stft_norm2(X):
     Returns
     -------
     norms2 : array
-        The squared L2 norm of every raw of X.
+        The squared L2 norm of every row of X.
     """
-    X2 = np.abs(X) ** 2
-    # compute all L2 coefs and remove freq zero once.
-    norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1))
+    X2 = (X * X.conj()).real
+    # compute all L2 coefs and remove first and last frequency once.
+    norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1) -
+              np.sum(X2[:, -1, :], axis=1))
     return norms2
diff --git a/mne/time_frequency/tests/test_ar.py b/mne/time_frequency/tests/test_ar.py
index 70ea665..01d49f4 100644
--- a/mne/time_frequency/tests/test_ar.py
+++ b/mne/time_frequency/tests/test_ar.py
@@ -1,16 +1,17 @@
 import os.path as op
 import numpy as np
 from numpy.testing import assert_array_almost_equal
-from nose.tools import assert_true
+from nose.tools import assert_true, assert_equal
 
 from mne import io, pick_types
-from mne.time_frequency import yule_walker, ar_raw
+from mne.time_frequency.ar import yule_walker, fit_iir_model_raw
 from mne.utils import requires_statsmodels, requires_patsy
 
 
 raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
 
+
 @requires_patsy
 @requires_statsmodels
 def test_yule_walker():
@@ -28,16 +29,10 @@ def test_ar_raw():
     """Test fitting AR model on raw data
     """
     raw = io.Raw(raw_fname)
-
-    # picks MEG gradiometers
+    # pick MEG gradiometers
     picks = pick_types(raw.info, meg='grad', exclude='bads')
-
     picks = picks[:2]
-
-    tmin, tmax = 0, 10  # use the first s of data
-    order = 2
-    coefs = ar_raw(raw, picks=picks, order=order, tmin=tmin, tmax=tmax)
-    mean_coefs = np.mean(coefs, axis=0)
-
-    assert_true(coefs.shape == (len(picks), order))
-    assert_true(0.9 < mean_coefs[0] < 1.1)
+    tmin, tmax, order = 0, 10, 2
+    coefs = fit_iir_model_raw(raw, order, picks, tmin, tmax)[1][1:]
+    assert_equal(coefs.shape, (order,))
+    assert_true(0.9 < -coefs[0] < 1.1)
diff --git a/mne/time_frequency/tests/test_csd.py b/mne/time_frequency/tests/test_csd.py
index 68cb9a0..753b191 100644
--- a/mne/time_frequency/tests/test_csd.py
+++ b/mne/time_frequency/tests/test_csd.py
@@ -1,6 +1,5 @@
 import numpy as np
-from nose.tools import (assert_raises, assert_equal, assert_almost_equal,
-                        assert_true)
+from nose.tools import assert_raises, assert_equal, assert_true
 from numpy.testing import assert_array_equal
 from os import path as op
 import warnings
@@ -9,7 +8,7 @@ import mne
 
 from mne.io import Raw
 from mne.utils import sum_squared
-from mne.time_frequency import compute_epochs_csd, induced_power
+from mne.time_frequency import compute_epochs_csd, tfr_morlet
 
 warnings.simplefilter('always')
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
@@ -24,7 +23,7 @@ def _get_data():
 
     # Set picks
     picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
-                                stim=False, exclude='bads')
+                           stim=False, exclude='bads')
 
     # Read several epochs
     event_id, tmin, tmax = 1, -0.2, 0.5
@@ -39,8 +38,8 @@ def _get_data():
                             picks=[0], baseline=(None, 0), preload=True,
                             reject=dict(grad=4000e-13))
     freq = 10
-    epochs_sin._data = np.sin(2 * np.pi * freq
-                              * epochs_sin.times)[None, None, :]
+    epochs_sin._data = np.sin(2 * np.pi * freq *
+                              epochs_sin.times)[None, None, :]
     return epochs, epochs_sin
 
 
@@ -74,11 +73,8 @@ def test_compute_epochs_csd():
 
     # Computing induced power for comparison
     epochs.crop(tmin=0.04, tmax=0.15)
-    with warnings.catch_warnings(record=True):  # deprecation
-        warnings.simplefilter('always')
-        power, _ = induced_power(epochs.get_data(), epochs.info['sfreq'], [10],
-                                 n_cycles=0.6)
-    power = np.mean(power, 2)
+    tfr = tfr_morlet(epochs, freqs=[10], n_cycles=0.6, return_itc=False)
+    power = np.mean(tfr.data, 2)
 
     # Maximum PSD should occur for specific channel
     max_ch_power = power.argmax()
@@ -163,5 +159,5 @@ def test_compute_epochs_csd_on_artificial_data():
                     delta = 0.05
                 else:
                     delta = 0.004
-                assert_true(abs(signal_power_per_sample - mt_power_per_sample)
-                            < delta)
+                assert_true(abs(signal_power_per_sample -
+                                mt_power_per_sample) < delta)
diff --git a/mne/time_frequency/tests/test_multitaper.py b/mne/time_frequency/tests/test_multitaper.py
index 1dd04f2..2c4bdbe 100644
--- a/mne/time_frequency/tests/test_multitaper.py
+++ b/mne/time_frequency/tests/test_multitaper.py
@@ -46,8 +46,8 @@ def test_multitaper_psd():
     for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):
         psd, freqs = multitaper_psd(x, sfreq, adaptive=adaptive, n_jobs=n_jobs,
                                     normalization=norm)
-        freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(x, sfreq,
-            adaptive=adaptive, jackknife=False)
+        freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(
+            x, sfreq, adaptive=adaptive, jackknife=False)
 
         # for some reason nitime returns n_times + 1 frequency points
         # causing the value at 0 to be different
diff --git a/mne/time_frequency/tests/test_psd.py b/mne/time_frequency/tests/test_psd.py
index 37a06b0..ab90940 100644
--- a/mne/time_frequency/tests/test_psd.py
+++ b/mne/time_frequency/tests/test_psd.py
@@ -3,9 +3,8 @@ import os.path as op
 from numpy.testing import assert_array_almost_equal
 from nose.tools import assert_true
 
-from mne import io, pick_types
-from mne import Epochs
-from mne import read_events
+from mne import io, pick_types, Epochs, read_events
+from mne.utils import requires_version, slow_test
 from mne.time_frequency import compute_raw_psd, compute_epochs_psd
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
@@ -13,6 +12,7 @@ raw_fname = op.join(base_dir, 'test_raw.fif')
 event_fname = op.join(base_dir, 'test-eve.fif')
 
 
+ at requires_version('scipy', '0.12')
 def test_psd():
     """Test PSD estimation
     """
@@ -22,13 +22,22 @@ def test_psd():
 
     # picks MEG gradiometers
     picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
-                            exclude=exclude)
+                       exclude=exclude)
 
     picks = picks[:2]
 
     tmin, tmax = 0, 10  # use the first 60s of data
     fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
-    n_fft = 128  # the FFT size (n_fft). Ideally a power of 2
+
+    n_fft = 128
+    psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, fmin=fmin,
+                                  fmax=fmax, proj=False, n_fft=n_fft,
+                                  picks=picks, n_jobs=1)
+    assert_true(psds.shape == (len(picks), len(freqs)))
+    assert_true(np.sum(freqs < 0) == 0)
+    assert_true(np.sum(psds < 0) == 0)
+
+    n_fft = 2048  # the FFT size (n_fft). Ideally a power of 2
     psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
                                   fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1,
                                   proj=False)
@@ -42,6 +51,7 @@ def test_psd():
     assert_true(np.sum(psds < 0) == 0)
 
 
+ at requires_version('scipy', '0.12')
 def test_psd_epochs():
     """Test PSD estimation on epochs
     """
@@ -51,30 +61,45 @@ def test_psd_epochs():
 
     # picks MEG gradiometers
     picks = pick_types(raw.info, meg='mag', eeg=False, stim=False,
-                            exclude=exclude)
+                       exclude=exclude)
 
     picks = picks[:2]
 
-    n_fft = 128  # the FFT size (n_fft). Ideally a power of 2
+    n_fft = 512  # the FFT size (n_fft). Ideally a power of 2
 
-    tmin, tmax, event_id = -1, 1, 1
+    tmin, tmax, event_id = -0.5, 0.5, 1
     include = []
     raw.info['bads'] += ['MEG 2443']  # bads
 
     # picks MEG gradiometers
     picks = pick_types(raw.info, meg='grad', eeg=False, eog=True,
-                            stim=False, include=include, exclude='bads')
+                       stim=False, include=include, exclude='bads')
 
     events = read_events(event_fname)
+
     epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0),
                     reject=dict(grad=4000e-13, eog=150e-6), proj=False,
                     preload=True)
 
+    tmin_full, tmax_full = -1, 1
+    epochs_full = Epochs(raw, events[:10], event_id, tmax=tmax_full,
+                         tmin=tmin_full, picks=picks,
+                         baseline=(None, 0),
+                         reject=dict(grad=4000e-13, eog=150e-6), proj=False,
+                         preload=True)
+
     picks = pick_types(epochs.info, meg='grad', eeg=False, eog=True,
-                            stim=False, include=include, exclude='bads')
-    psds, freqs = compute_epochs_psd(epochs[:1], fmin=2, fmax=300, n_fft=n_fft,
-                                     picks=picks)
+                       stim=False, include=include, exclude='bads')
+    psds, freqs = compute_epochs_psd(epochs[:1], fmin=2, fmax=300,
+                                     n_fft=n_fft, picks=picks)
+
+    psds_t, freqs_t = compute_epochs_psd(epochs_full[:1], fmin=2, fmax=300,
+                                         tmin=tmin, tmax=tmax,
+                                         n_fft=n_fft, picks=picks)
+    # this one will fail if you add for example 0.1 to tmin
+    assert_array_almost_equal(psds, psds_t, 27)
+
     psds_proj, _ = compute_epochs_psd(epochs[:1].apply_proj(), fmin=2,
                                       fmax=300, n_fft=n_fft, picks=picks)
 
@@ -82,3 +107,51 @@ def test_psd_epochs():
     assert_true(psds.shape == (1, len(picks), len(freqs)))
     assert_true(np.sum(freqs < 0) == 0)
     assert_true(np.sum(psds < 0) == 0)
+
+
+ at slow_test
+ at requires_version('scipy', '0.12')
+def test_compares_psd():
+    """Test PSD estimation on raw for plt.psd and scipy.signal.welch
+    """
+    raw = io.Raw(raw_fname)
+
+    exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053']  # bads + 2 more
+
+    # picks MEG gradiometers
+    picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
+                       exclude=exclude)[:2]
+
+    tmin, tmax = 0, 10  # use the first 60s of data
+    fmin, fmax = 2, 70  # look at frequencies between 5 and 70Hz
+    n_fft = 2048
+
+    # Compute psds with the new implementation using Welch
+    psds_welch, freqs_welch = compute_raw_psd(raw, tmin=tmin, tmax=tmax,
+                                              fmin=fmin, fmax=fmax,
+                                              proj=False, picks=picks,
+                                              n_fft=n_fft, n_jobs=1)
+
+    # Compute psds with plt.psd
+    start, stop = raw.time_as_index([tmin, tmax])
+    data, times = raw[picks, start:(stop + 1)]
+    from matplotlib.pyplot import psd
+    out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
+    freqs_mpl = out[0][1]
+    psds_mpl = np.array([o[0] for o in out])
+
+    mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
+    freqs_mpl = freqs_mpl[mask]
+    psds_mpl = psds_mpl[:, mask]
+
+    assert_array_almost_equal(psds_welch, psds_mpl)
+    assert_array_almost_equal(freqs_welch, freqs_mpl)
+
+    assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
+    assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
+
+    assert_true(np.sum(freqs_welch < 0) == 0)
+    assert_true(np.sum(freqs_mpl < 0) == 0)
+
+    assert_true(np.sum(psds_welch < 0) == 0)
+    assert_true(np.sum(psds_mpl < 0) == 0)
diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py
index e7eca49..bf91f39 100644
--- a/mne/time_frequency/tests/test_stft.py
+++ b/mne/time_frequency/tests/test_stft.py
@@ -11,6 +11,7 @@ def test_stft():
     sfreq = 1000.  # Hz
     f = 7.  # Hz
     for T in [253, 256]:  # try with even and odd numbers
+        # Test with low frequency signal
         t = np.arange(T).astype(np.float)
         x = np.sin(2 * np.pi * f * t / sfreq)
         x = np.array([x, x + 1.])
@@ -26,12 +27,31 @@ def test_stft():
         assert_true(X.shape[1] == len(freqs))
         assert_true(np.all(freqs >= 0.))
         assert_true(np.abs(max_freq - f) < 1.)
+        assert_array_almost_equal(x, xp, decimal=6)
+
+        # norm conservation thanks to tight frame property
+        assert_almost_equal(np.sqrt(stft_norm2(X)),
+                            [linalg.norm(xx) for xx in x], decimal=6)
+
+        # Test with random signal
+        x = np.random.randn(2, T)
+        wsize = 16
+        tstep = 8
+        X = stft(x, wsize, tstep)
+        xp = istft(X, tstep, Tx=T)
 
+        freqs = stftfreq(wsize, sfreq=1000)
+
+        max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
+
+        assert_true(X.shape[1] == len(freqs))
+        assert_true(np.all(freqs >= 0.))
         assert_array_almost_equal(x, xp, decimal=6)
 
         # norm conservation thanks to tight frame property
         assert_almost_equal(np.sqrt(stft_norm2(X)),
-                            [linalg.norm(xx) for xx in x], decimal=2)
+                            [linalg.norm(xx) for xx in x],
+                            decimal=6)
 
         # Try with empty array
         x = np.zeros((0, T))
diff --git a/mne/time_frequency/tests/test_stockwell.py b/mne/time_frequency/tests/test_stockwell.py
new file mode 100644
index 0000000..1d57963
--- /dev/null
+++ b/mne/time_frequency/tests/test_stockwell.py
@@ -0,0 +1,96 @@
+# Authors : Denis A. Engemann <denis.engemann at gmail.com>
+#           Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#
+# License : BSD 3-clause
+
+import numpy as np
+import os.path as op
+from numpy.testing import assert_array_almost_equal, assert_allclose
+from nose.tools import assert_true, assert_equal
+
+from scipy import fftpack
+
+from mne import io, read_events, Epochs, pick_types
+from mne.time_frequency._stockwell import (tfr_stockwell, _st,
+                                           _precompute_st_windows)
+from mne.time_frequency.tfr import AverageTFR
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(base_dir, 'test_raw.fif')
+
+event_id, tmin, tmax = 1, -0.2, 0.5
+event_id_2 = 2
+raw = io.Raw(raw_fname, add_eeg_ref=False)
+event_name = op.join(base_dir, 'test-eve.fif')
+events = read_events(event_name)
+picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
+                   ecg=True, eog=True, include=['STI 014'],
+                   exclude='bads')
+
+reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
+flat = dict(grad=1e-15, mag=1e-15)
+
+
+def test_stockwell_core():
+    """Test stockwell transform"""
+    # adapted from
+    # http://vcs.ynic.york.ac.uk/docs/naf/intro/concepts/timefreq.html
+    sfreq = 1000.0  # make things easy to understand
+    dur = 0.5
+    onset, offset = 0.175, 0.275
+    n_samp = int(sfreq * dur)
+    t = np.arange(n_samp) / sfreq   # make an array for time
+    pulse_freq = 15.
+    pulse = np.cos(2. * np.pi * pulse_freq * t)
+    pulse[0:int(onset * sfreq)] = 0.        # Zero before our desired pulse
+    pulse[int(offset * sfreq):] = 0.         # and zero after our desired pulse
+
+    width = 0.5
+    freqs = fftpack.fftfreq(len(pulse), 1. / sfreq)
+    fmin, fmax = 1.0, 100.0
+    start_f, stop_f = [np.abs(freqs - f).argmin() for f in (fmin, fmax)]
+    W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
+
+    st_pulse = _st(pulse, start_f, W)
+    st_pulse = np.abs(st_pulse) ** 2
+    assert_equal(st_pulse.shape[-1], len(pulse))
+    st_max_freq = freqs[st_pulse.max(axis=1).argmax(axis=0)]  # max freq
+    assert_allclose(st_max_freq, pulse_freq, atol=1.0)
+    assert_true(onset < t[st_pulse.max(axis=0).argmax(axis=0)] < offset)
+
+    # test inversion to FFT, by averaging local spectra, see eq. 5 in
+    # Moukadem, A., Bouguila, Z., Ould Abdeslam, D. and Alain Dieterlen.
+    # "Stockwell transform optimization applied on the detection of split in
+    # heart sounds."
+
+    width = 1.0
+    start_f, stop_f = 0, len(pulse)
+    W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
+    y = _st(pulse, start_f, W)
+    # invert stockwell
+    y_inv = fftpack.ifft(np.sum(y, axis=1)).real
+    assert_array_almost_equal(pulse, y_inv)
+
+
+def test_stockwell_api():
+    """Test stockwell functions"""
+    epochs = Epochs(raw, events,  # XXX pick 2 has epochs of zeros.
+                    event_id, tmin, tmax, picks=[0, 1, 3], baseline=(None, 0))
+    for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
+        power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,
+                                   return_itc=True)
+        if fmax is not None:
+            assert_true(power.freqs.max() <= fmax)
+        power_evoked = tfr_stockwell(epochs.average(), fmin=fmin, fmax=fmax,
+                                     return_itc=False)
+        # for multitaper these don't necessarily match, but they seem to
+        # for stockwell... if this fails, this maybe could be changed
+        # just to check the shape
+        assert_array_almost_equal(power_evoked.data, power.data)
+    assert_true(isinstance(power, AverageTFR))
+    assert_true(isinstance(itc, AverageTFR))
+    assert_equal(power.data.shape, itc.data.shape)
+    assert_true(itc.data.min() >= 0.0)
+    assert_true(itc.data.max() <= 1.0)
+    assert_true(np.log(power.data.max()) * 20 <= 0.0)
+    assert_true(np.log(power.data.max()) * 20 <= 0.0)
diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py
index d9497cc..ee7a734 100644
--- a/mne/time_frequency/tests/test_tfr.py
+++ b/mne/time_frequency/tests/test_tfr.py
@@ -1,12 +1,18 @@
 import numpy as np
 import os.path as op
-from numpy.testing import assert_array_almost_equal
-from nose.tools import assert_true, assert_false, assert_equal
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from nose.tools import assert_true, assert_false, assert_equal, assert_raises
 
 import mne
-from mne import io, Epochs, read_events, pick_types
+from mne import io, Epochs, read_events, pick_types, create_info, EpochsArray
+from mne.utils import _TempDir, run_tests_if_main, slow_test, requires_h5py
 from mne.time_frequency import single_trial_power
 from mne.time_frequency.tfr import cwt_morlet, morlet, tfr_morlet
+from mne.time_frequency.tfr import _dpss_wavelet, tfr_multitaper
+from mne.time_frequency.tfr import AverageTFR, read_tfrs, write_tfrs
+
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
 
 raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
                     'test_raw.fif')
@@ -40,7 +46,7 @@ def test_time_frequency():
 
     # picks MEG gradiometers
     picks = pick_types(raw.info, meg='grad', eeg=False,
-                            stim=False, include=include, exclude=exclude)
+                       stim=False, include=include, exclude=exclude)
 
     picks = picks[:2]
     epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
@@ -49,22 +55,35 @@ def test_time_frequency():
     times = epochs.times
     nave = len(data)
 
+    epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax,
+                            baseline=(None, 0))
+
     freqs = np.arange(6, 20, 5)  # define frequencies of interest
     n_cycles = freqs / 4.
 
     # Test first with a single epoch
     power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
                             use_fft=True, return_itc=True)
-
+    # Now compute evoked
+    evoked = epochs.average()
+    power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
+                              return_itc=False)
+    assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
     power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
                             use_fft=True, return_itc=True)
+    # Test picks argument
+    power_picks, itc_picks = tfr_morlet(epochs_nopicks, freqs=freqs,
+                                        n_cycles=n_cycles, use_fft=True,
+                                        return_itc=True, picks=picks)
+    # the actual data arrays here are equivalent, too...
+    assert_array_almost_equal(power.data, power_picks.data)
+    assert_array_almost_equal(itc.data, itc_picks.data)
+    assert_array_almost_equal(power.data, power_evoked.data)
 
     print(itc)  # test repr
-    print(itc.ch_names) # test property
-    itc = itc + power # test add
-    itc = itc - power # test add
-    itc -= power
-    itc += power
+    print(itc.ch_names)  # test property
+    itc += power  # test add
+    itc -= power  # test add
 
     power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
 
@@ -107,3 +126,199 @@ def test_time_frequency():
     mne.equalize_channels([power_pick, power_drop])
     assert_equal(power_pick.ch_names, power_drop.ch_names)
     assert_equal(power_pick.data.shape, power_drop.data.shape)
+
+
+def test_dpsswavelet():
+    """Test DPSS wavelet"""
+    freqs = np.arange(5, 25, 3)
+    Ws = _dpss_wavelet(1000, freqs=freqs, n_cycles=freqs / 2.,
+                       time_bandwidth=4.0, zero_mean=True)
+
+    assert_true(len(Ws) == 3)  # 3 tapers expected
+
+    # Check that zero mean is true
+    assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
+
+    assert_true(len(Ws[0]) == len(freqs))  # As many wavelets as asked for
+
+
+ at slow_test
+def test_tfr_multitaper():
+    """Test tfr_multitaper"""
+    sfreq = 200.0
+    ch_names = ['SIM0001', 'SIM0002', 'SIM0003']
+    ch_types = ['grad', 'grad', 'grad']
+    info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
+
+    n_times = int(sfreq)  # Second long epochs
+    n_epochs = 3
+    seed = 42
+    rng = np.random.RandomState(seed)
+    noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
+    t = np.arange(n_times, dtype=np.float) / sfreq
+    signal = np.sin(np.pi * 2. * 50. * t)  # 50 Hz sinusoid signal
+    signal[np.logical_or(t < 0.45, t > 0.55)] = 0.  # Hard windowing
+    on_time = np.logical_and(t >= 0.45, t <= 0.55)
+    signal[on_time] *= np.hanning(on_time.sum())  # Ramping
+    dat = noise + signal
+
+    reject = dict(grad=4000.)
+    events = np.empty((n_epochs, 3), int)
+    first_event_sample = 100
+    event_id = dict(sin50hz=1)
+    for k in range(n_epochs):
+        events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
+
+    epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
+                         reject=reject)
+
+    freqs = np.arange(5, 100, 3, dtype=np.float)
+    power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
+                                time_bandwidth=4.0)
+    picks = np.arange(len(ch_names))
+    power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
+                                            n_cycles=freqs / 2.,
+                                            time_bandwidth=4.0, picks=picks)
+    power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
+                                  n_cycles=freqs / 2., time_bandwidth=4.0,
+                                  return_itc=False)
+    # test picks argument
+    assert_array_almost_equal(power.data, power_picks.data)
+    assert_array_almost_equal(itc.data, itc_picks.data)
+    # one is squared magnitude of the average (evoked) and
+    # the other is average of the squared magnitudes (epochs PSD)
+    # so values shouldn't match, but shapes should
+    assert_array_equal(power.data.shape, power_evoked.data.shape)
+    assert_raises(AssertionError, assert_array_almost_equal,
+                  power.data, power_evoked.data)
+
+    tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
+    fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
+    assert_true(tmax > 0.3 and tmax < 0.7)
+    assert_false(np.any(itc.data < 0.))
+    assert_true(fmax > 40 and fmax < 60)
+
+
+def test_crop():
+    """Test TFR cropping"""
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.crop(0.2, 0.3)
+    assert_array_equal(tfr.times, [0.2, 0.3])
+    assert_equal(tfr.data.shape[-1], 2)
+
+
+ at requires_h5py
+def test_io():
+    """Test TFR IO capacities"""
+
+    tempdir = _TempDir()
+    fname = op.join(tempdir, 'test-tfr.h5')
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.save(fname)
+    tfr2 = read_tfrs(fname, condition='test')
+
+    assert_array_equal(tfr.data, tfr2.data)
+    assert_array_equal(tfr.times, tfr2.times)
+    assert_array_equal(tfr.freqs, tfr2.freqs)
+    assert_equal(tfr.comment, tfr2.comment)
+    assert_equal(tfr.nave, tfr2.nave)
+
+    assert_raises(IOError, tfr.save, fname)
+
+    tfr.comment = None
+    tfr.save(fname, overwrite=True)
+    assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
+    tfr.comment = 'test-A'
+    tfr2.comment = 'test-B'
+
+    fname = op.join(tempdir, 'test2-tfr.h5')
+    write_tfrs(fname, [tfr, tfr2])
+    tfr3 = read_tfrs(fname, condition='test-A')
+    assert_equal(tfr.comment, tfr3.comment)
+
+    assert_true(isinstance(tfr.info, io.meas_info.Info))
+
+    tfrs = read_tfrs(fname, condition=None)
+    assert_equal(len(tfrs), 2)
+    tfr4 = tfrs[1]
+    assert_equal(tfr2.comment, tfr4.comment)
+
+    assert_raises(ValueError, read_tfrs, fname, condition='nonono')
+
+
+def test_plot():
+    """Test TFR plotting."""
+    import matplotlib.pyplot as plt
+
+    data = np.zeros((3, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
+                           ['mag', 'mag', 'mag'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr.plot([1, 2], title='title')
+    plt.close('all')
+    ax = plt.subplot2grid((2, 2), (0, 0))
+    ax2 = plt.subplot2grid((2, 2), (1, 1))
+    ax3 = plt.subplot2grid((2, 2), (0, 1))
+    tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
+    plt.close('all')
+
+    tfr.plot_topo(picks=[1, 2])
+    plt.close('all')
+
+    tfr.plot_topo(picks=[1, 2])
+    plt.close('all')
+
+
+def test_add_channels():
+    """Test tfr splitting / re-appending channel types
+    """
+    data = np.zeros((6, 2, 3))
+    times = np.array([.1, .2, .3])
+    freqs = np.array([.10, .20])
+    info = mne.create_info(
+        ['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
+        1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
+    tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
+                     nave=20, comment='test', method='crazy-tfr')
+    tfr_eeg = tfr.pick_types(meg=False, eeg=True, copy=True)
+    tfr_meg = tfr.pick_types(meg=True, copy=True)
+    tfr_stim = tfr.pick_types(meg=False, stim=True, copy=True)
+    tfr_eeg_meg = tfr.pick_types(meg=True, eeg=True, copy=True)
+    tfr_new = tfr_meg.add_channels([tfr_eeg, tfr_stim], copy=True)
+    assert_true(all(ch in tfr_new.ch_names
+                    for ch in tfr_stim.ch_names + tfr_meg.ch_names))
+    tfr_new = tfr_meg.add_channels([tfr_eeg], copy=True)
+
+    assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
+    assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
+    assert_true(all(ch not in tfr_new.ch_names
+                    for ch in tfr_stim.ch_names))
+
+    # Now test errors
+    tfr_badsf = tfr_eeg.copy()
+    tfr_badsf.info['sfreq'] = 3.1415927
+    tfr_eeg = tfr_eeg.crop(-.1, .1)
+
+    assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
+    assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
+    assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
+    assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
+
+
+run_tests_if_main()
diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py
index 7653eed..4623877 100644
--- a/mne/time_frequency/tfr.py
+++ b/mne/time_frequency/tfr.py
@@ -1,12 +1,13 @@
-"""A module which implements the continuous wavelet transform
-with complex Morlet wavelets.
+"""A module which implements the time frequency estimation.
 
-Author : Alexandre Gramfort, alexandre.gramfort at telecom-paristech.fr (2011)
-License : BSD 3-clause
-
-inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
+Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
 """
+# Authors : Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#           Hari Bharadwaj <hari at nmr.mgh.harvard.edu>
+#
+# License : BSD (3-clause)
 
+import warnings
 from math import sqrt
 from copy import deepcopy
 import numpy as np
@@ -16,18 +17,37 @@ from scipy.fftpack import fftn, ifftn
 from ..fixes import partial
 from ..baseline import rescale
 from ..parallel import parallel_func
-from ..utils import logger, verbose
-from ..channels import ContainsMixin, PickDropChannelsMixin
+from ..utils import logger, verbose, _time_mask
+from ..channels.channels import ContainsMixin, UpdateChannelsMixin
 from ..io.pick import pick_info, pick_types
-from ..utils import deprecated
+from ..io.meas_info import Info
+from ..utils import check_fname
+from .multitaper import dpss_windows
+from ..viz.utils import figure_nobar
+from ..externals.h5io import write_hdf5, read_hdf5
+
+
+def _get_data(inst, return_itc):
+    """Get data from Epochs or Evoked instance as epochs x ch x time"""
+    from ..epochs import _BaseEpochs
+    from ..evoked import Evoked
+    if not isinstance(inst, (_BaseEpochs, Evoked)):
+        raise TypeError('inst must be Epochs or Evoked')
+    if isinstance(inst, _BaseEpochs):
+        data = inst.get_data()
+    else:
+        if return_itc:
+            raise ValueError('return_itc must be False for evoked data')
+        data = inst.data[np.newaxis, ...].copy()
+    return data
 
 
-def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
+def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
     """Compute Wavelets for the given frequency range
 
     Parameters
     ----------
-    Fs : float
+    sfreq : float
         Sampling Frequency
     freqs : array
         frequency range of interest (1 x Frequencies)
@@ -48,9 +68,15 @@ def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
     -------
     Ws : list of array
         Wavelets time series
+
+    See Also
+    --------
+    mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+                                    with Morlet wavelets
     """
     Ws = list()
     n_cycles = np.atleast_1d(n_cycles)
+
     if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
         raise ValueError("n_cycles should be fixed or defined for "
                          "each frequency.")
@@ -66,7 +92,7 @@ def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
             sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
         # this scaling factor is proportional to (Tallon-Baudry 98):
         # (sigma_t*sqrt(pi))^(-1/2);
-        t = np.arange(0, 5 * sigma_t, 1.0 / Fs)
+        t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
         t = np.r_[-t[::-1], t[1:]]
         oscillation = np.exp(2.0 * 1j * np.pi * f * t)
         gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
@@ -79,6 +105,70 @@ def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
     return Ws
 
 
+def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
+                  zero_mean=False):
+    """Compute Wavelets for the given frequency range
+
+    Parameters
+    ----------
+    sfreq : float
+        Sampling Frequency.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+        Defaults to 7.
+    time_bandwidth : float, (optional)
+        Time x Bandwidth product.
+        The number of good tapers (low-bias) is chosen automatically based on
+        this to equal floor(time_bandwidth - 1).
+        Default is 4.0, giving 3 good tapers.
+
+    Returns
+    -------
+    Ws : list of array
+        Wavelets time series
+    """
+    Ws = list()
+    if time_bandwidth < 2.0:
+        raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
+    n_taps = int(np.floor(time_bandwidth - 1))
+    n_cycles = np.atleast_1d(n_cycles)
+
+    if n_cycles.size != 1 and n_cycles.size != len(freqs):
+        raise ValueError("n_cycles should be fixed or defined for "
+                         "each frequency.")
+
+    for m in range(n_taps):
+        Wm = list()
+        for k, f in enumerate(freqs):
+            if len(n_cycles) != 1:
+                this_n_cycles = n_cycles[k]
+            else:
+                this_n_cycles = n_cycles[0]
+
+            t_win = this_n_cycles / float(f)
+            t = np.arange(0., t_win, 1.0 / sfreq)
+            # Making sure wavelets are centered before tapering
+            oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
+
+            # Get dpss tapers
+            tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
+                                        n_taps)
+
+            Wk = oscillation * tapers[m]
+            if zero_mean:  # to make it zero mean
+                real_offset = Wk.mean()
+                Wk -= real_offset
+            Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
+
+            Wm.append(Wk)
+
+        Ws.append(Wm)
+
+    return Ws
+
+
 def _centered(arr, newsize):
     """Aux Function to center data"""
     # Return the center newsize portion of the array.
@@ -157,14 +247,17 @@ def _cwt_convolve(X, Ws, mode='same'):
         yield tfr
 
 
-def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
+def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
     """Compute time freq decomposition with Morlet wavelets
 
+    This function operates directly on numpy arrays. Consider using
+    `tfr_morlet` to process `Epochs` or `Evoked` instances.
+
     Parameters
     ----------
     X : array of shape [n_signals, n_times]
         signals (one per line)
-    Fs : float
+    sfreq : float
         sampling Frequency
     freqs : array
         Array of frequencies of interest
@@ -179,6 +272,10 @@ def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
     -------
     tfr : 3D array
         Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+    See Also
+    --------
+    tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
     """
     mode = 'same'
     # mode = "valid"
@@ -186,7 +283,7 @@ def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
     n_frequencies = len(freqs)
 
     # Precompute wavelets for given frequency range to save time
-    Ws = morlet(Fs, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
+    Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
 
     if use_fft:
         coefs = _cwt_fft(X, Ws, mode)
@@ -220,6 +317,11 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1):
     -------
     tfr : 3D array
         Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+    See Also
+    --------
+    mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+                                    with Morlet wavelets
     """
     n_signals, n_times = X[:, ::decim].shape
     n_frequencies = len(Ws)
@@ -236,13 +338,14 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1):
     return tfrs
 
 
-def _time_frequency(X, Ws, use_fft):
+def _time_frequency(X, Ws, use_fft, decim):
     """Aux of time_frequency for parallel computing over channels
     """
     n_epochs, n_times = X.shape
+    n_times = n_times // decim + bool(n_times % decim)
     n_frequencies = len(Ws)
     psd = np.zeros((n_frequencies, n_times))  # PSD
-    plf = np.zeros((n_frequencies, n_times), dtype=np.complex)  # phase lock
+    plf = np.zeros((n_frequencies, n_times), np.complex)  # phase lock
 
     mode = 'same'
     if use_fft:
@@ -251,15 +354,17 @@ def _time_frequency(X, Ws, use_fft):
         tfrs = _cwt_convolve(X, Ws, mode)
 
     for tfr in tfrs:
+        tfr = tfr[:, ::decim]
         tfr_abs = np.abs(tfr)
         psd += tfr_abs ** 2
         plf += tfr / tfr_abs
-
+    psd /= n_epochs
+    plf = np.abs(plf) / n_epochs
     return psd, plf
 
 
 @verbose
-def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
+def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
                        baseline=None, baseline_mode='ratio', times=None,
                        decim=1, n_jobs=1, zero_mean=False, verbose=None):
     """Compute time-frequency power on single epochs
@@ -268,7 +373,7 @@ def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     ----------
     data : array of shape [n_epochs, n_channels, n_times]
         The epochs
-    Fs : float
+    sfreq : float
         Sampling rate
     frequencies : array-like
         The frequencies
@@ -311,7 +416,7 @@ def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     n_epochs, n_channels, n_times = data[:, :, ::decim].shape
 
     # Precompute wavelets for given frequency range to save time
-    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+    Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
 
     parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
 
@@ -325,12 +430,13 @@ def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
     if n_jobs == 1:
         for k, e in enumerate(data):
-            power[k] = np.abs(cwt(e, **cwt_kw)) ** 2
+            x = cwt(e, **cwt_kw)
+            power[k] = (x * x.conj()).real
     else:
         # Precompute tf decompositions in parallel
         tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
         for k, tfr in enumerate(tfrs):
-            power[k] = np.abs(tfr) ** 2
+            power[k] = (tfr * tfr.conj()).real
 
     # Run baseline correction.  Be sure to decimate the times array as well if
     # needed.
@@ -340,8 +446,8 @@ def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     return power
 
 
-def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
-                   decim=1, n_jobs=1, zero_mean=False):
+def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
+                       decim=1, n_jobs=1, zero_mean=False):
     """Compute time induced power and inter-trial phase-locking factor
 
     The time frequency decomposition is done with Morlet wavelets
@@ -350,7 +456,7 @@ def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     ----------
     data : array
         3D array of shape [n_epochs, n_channels, n_times]
-    Fs : float
+    sfreq : float
         sampling Frequency
     frequencies : array
         Array of frequencies of interest
@@ -379,75 +485,19 @@ def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
     n_epochs, n_channels, n_times = data[:, :, ::decim].shape
 
     # Precompute wavelets for given frequency range to save time
-    Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
-
-    if n_jobs == 1:
-        psd = np.empty((n_channels, n_frequencies, n_times))
-        plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
-
-        for c in range(n_channels):
-            X = data[:, c, :]
-            this_psd, this_plf = _time_frequency(X, Ws, use_fft)
-            psd[c], plf[c] = this_psd[:, ::decim], this_plf[:, ::decim]
-    else:
-        parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
-
-        psd_plf = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
-                                             Ws, use_fft)
-                           for c in range(n_channels))
-
-        psd = np.zeros((n_channels, n_frequencies, n_times))
-        plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
-        for c, (psd_c, plf_c) in enumerate(psd_plf):
-            psd[c, :, :], plf[c, :, :] = psd_c[:, ::decim], plf_c[:, ::decim]
-
-    psd /= n_epochs
-    plf = np.abs(plf) / n_epochs
+    Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+    psd = np.empty((n_channels, n_frequencies, n_times))
+    plf = np.empty((n_channels, n_frequencies, n_times))
+    # Separate to save memory for n_jobs=1
+    parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
+    psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
+                       for c in range(n_channels))
+    for c, (psd_c, plf_c) in enumerate(psd_plf):
+        psd[c, :, :], plf[c, :, :] = psd_c, plf_c
     return psd, plf
 
 
- at deprecated("induced_power will be removed in release 0.9. Use "
-            "tfr_morlet instead.")
-def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
-                  decim=1, n_jobs=1, zero_mean=False):
-    """Compute time induced power and inter-trial phase-locking factor
-
-    The time frequency decomposition is done with Morlet wavelets
-
-    Parameters
-    ----------
-    data : array
-        3D array of shape [n_epochs, n_channels, n_times]
-    Fs : float
-        sampling Frequency
-    frequencies : array
-        Array of frequencies of interest
-    use_fft : bool
-        Compute transform with fft based convolutions or temporal
-        convolutions.
-    n_cycles : float | array of float
-        Number of cycles. Fixed number or one per frequency.
-    decim: int
-        Temporal decimation factor
-    n_jobs : int
-        The number of CPUs used in parallel. All CPUs are used in -1.
-        Requires joblib package.
-    zero_mean : bool
-        Make sure the wavelets are zero mean.
-
-    Returns
-    -------
-    power : 2D array
-        Induced power (Channels x Frequencies x Timepoints).
-        Squared amplitude of time-frequency coefficients.
-    phase_lock : 2D array
-        Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
-    """
-    return _induced_power(data, Fs, frequencies, use_fft=use_fft,
-                          n_cycles=n_cycles, decim=decim, n_jobs=n_jobs,
-                          zero_mean=zero_mean)
-
-
 def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
                  baseline, vmin, vmax, dB):
     """Aux Function to prepare tfr computation"""
@@ -460,19 +510,21 @@ def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
 
     # crop time
     itmin, itmax = None, None
+    idx = np.where(_time_mask(times, tmin, tmax))[0]
     if tmin is not None:
-        itmin = np.where(times >= tmin)[0][0]
+        itmin = idx[0]
     if tmax is not None:
-        itmax = np.where(times <= tmax)[0][-1]
+        itmax = idx[-1] + 1
 
     times = times[itmin:itmax]
 
     # crop freqs
     ifmin, ifmax = None, None
+    idx = np.where(_time_mask(freqs, fmin, fmax))[0]
     if fmin is not None:
-        ifmin = np.where(freqs >= fmin)[0][0]
+        ifmin = idx[0]
     if fmax is not None:
-        ifmax = np.where(freqs <= fmax)[0][-1]
+        ifmax = idx[-1] + 1
 
     freqs = freqs[ifmin:ifmax]
 
@@ -481,14 +533,13 @@ def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
 
     times *= 1e3
     if dB:
-        data = 20 * np.log10(data)
+        data = 10 * np.log10((data * data.conj()).real)
 
     vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
     return data, times, freqs, vmin, vmax
 
 
-# XXX : todo IO of TFRs
-class AverageTFR(ContainsMixin, PickDropChannelsMixin):
+class AverageTFR(ContainsMixin, UpdateChannelsMixin):
     """Container for Time-Frequency data
 
     Can for example store induced power at sensor level or intertrial
@@ -506,6 +557,14 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         The frequencies in Hz.
     nave : int
         The number of averaged TFRs.
+    comment : str | None
+        Comment on the data, e.g., the experimental condition.
+        Defaults to None.
+    method : str | None
+        Comment on the method used to compute the data, e.g., morlet wavelet.
+        Defaults to None.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Attributes
     ----------
@@ -513,7 +572,8 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         The names of the channels.
     """
     @verbose
-    def __init__(self, info, data, times, freqs, nave, verbose=None):
+    def __init__(self, info, data, times, freqs, nave, comment=None,
+                 method=None, verbose=None):
         self.info = info
         if data.ndim != 3:
             raise ValueError('data should be 3d. Got %d.' % data.ndim)
@@ -531,20 +591,41 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         self.times = times
         self.freqs = freqs
         self.nave = nave
+        self.comment = comment
+        self.method = method
 
     @property
     def ch_names(self):
         return self.info['ch_names']
 
+    def crop(self, tmin=None, tmax=None, copy=False):
+        """Crop data to a given time interval
+
+        Parameters
+        ----------
+        tmin : float | None
+            Start time of selection in seconds.
+        tmax : float | None
+            End time of selection in seconds.
+        copy : bool
+            If False epochs is cropped in place.
+        """
+        inst = self if not copy else self.copy()
+        mask = _time_mask(inst.times, tmin, tmax)
+        inst.times = inst.times[mask]
+        inst.data = inst.data[..., mask]
+        return inst
+
     @verbose
-    def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
-             fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
-             dB=False, colorbar=True, show=True, verbose=None):
+    def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
+             tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+             cmap='RdBu_r', dB=False, colorbar=True, show=True,
+             title=None, axes=None, layout=None, verbose=None):
         """Plot TFRs in a topography with images
 
         Parameters
         ----------
-        picks : array-like of int
+        picks : array-like of int | None
             The indices of the channels to plot.
         baseline : None (default) or tuple of length 2
             The time interval to apply baseline correction.
@@ -578,49 +659,121 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         vmax : float | None
             The maxinum value an the color scale. If vmax is None, the data
             maximum value is used.
-        layout : Layout | None
-            Layout instance specifying sensor positions. If possible, the
-            correct layout is inferred from the data.
         cmap : matplotlib colormap | str
             The colormap to use. Defaults to 'RdBu_r'.
         dB : bool
             If True, 20*log10 is applied to the data to get dB.
         colorbar : bool
-            If true, colorbar will be added to the plot
-        layout_scale : float
-            Scaling factor for adjusting the relative size of the layout
-            on the canvas
+            If true, colorbar will be added to the plot. For user defined axes,
+            the colorbar cannot be drawn. Defaults to True.
         show : bool
             Call pyplot.show() at the end.
+        title : str | None
+            String for title. Defaults to None (blank/no title).
+        axes : instance of Axes | list | None
+            The axes to plot to. If list, the list must be a list of Axes of
+            the same length as the number of channels. If instance of Axes,
+            there must be only one channel plotted.
+        layout : Layout | None
+            Layout instance specifying sensor positions. Used for interactive
+            plotting of topographies on rectangle selection. If possible, the
+            correct layout is inferred from the data.
         verbose : bool, str, int, or None
             If not None, override default verbose level (see mne.verbose).
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
         """
         from ..viz.topo import _imshow_tfr
         import matplotlib.pyplot as plt
         times, freqs = self.times.copy(), self.freqs.copy()
-        data = self.data[picks]
+        info = self.info
+        data = self.data
+
+        n_picks = len(picks)
+        info, data, picks = _prepare_picks(info, data, picks)
+        data = data[picks]
 
         data, times, freqs, vmin, vmax = \
             _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
                          baseline, vmin, vmax, dB)
 
         tmin, tmax = times[0], times[-1]
-
-        for k, p in zip(range(len(data)), picks):
-            plt.figure()
-            _imshow_tfr(plt, 0, tmin, tmax, vmin, vmax, ylim=None,
-                        tfr=data[k: k + 1], freq=freqs, x_label='Time (ms)',
-                        y_label='Frequency (Hz)', colorbar=colorbar,
-                        picker=False, cmap=cmap)
-
+        if isinstance(axes, plt.Axes):
+            axes = [axes]
+        if isinstance(axes, list) or isinstance(axes, np.ndarray):
+            if len(axes) != n_picks:
+                raise RuntimeError('There must be an axes for each picked '
+                                   'channel.')
+
+        for idx in range(len(data)):
+            if axes is None:
+                fig = plt.figure()
+                ax = fig.add_subplot(111)
+            else:
+                ax = axes[idx]
+                fig = ax.get_figure()
+            onselect_callback = partial(self._onselect, baseline=baseline,
+                                        mode=mode, layout=layout)
+            _imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
+                        ylim=None, tfr=data[idx: idx + 1], freq=freqs,
+                        x_label='Time (ms)', y_label='Frequency (Hz)',
+                        colorbar=colorbar, picker=False, cmap=cmap)
+            if title:
+                fig.suptitle(title)
+            colorbar = False  # only one colorbar for multiple axes
         if show:
-            import matplotlib.pyplot as plt
             plt.show()
+        return fig
+
+    def _onselect(self, eclick, erelease, baseline, mode, layout):
+        """Callback function called by rubber band selector in channel tfr."""
+        import matplotlib.pyplot as plt
+        from ..viz import plot_tfr_topomap
+        if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
+            return
+        plt.ion()  # turn interactive mode on
+        tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5)  # ms to s
+        tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
+        fmin = round(min(eclick.ydata, erelease.ydata), 5)  # Hz
+        fmax = round(max(eclick.ydata, erelease.ydata), 5)
+        tmin = min(self.times, key=lambda x: abs(x - tmin))  # find closest
+        tmax = min(self.times, key=lambda x: abs(x - tmax))
+        fmin = min(self.freqs, key=lambda x: abs(x - fmin))
+        fmax = min(self.freqs, key=lambda x: abs(x - fmax))
+        if tmin == tmax or fmin == fmax:
+            logger.info('The selected area is too small. '
+                        'Select a larger time-frequency window.')
+            return
+
+        types = list()
+        if 'eeg' in self:
+            types.append('eeg')
+        if 'mag' in self:
+            types.append('mag')
+        if 'grad' in self:
+            types.append('grad')
+        fig = figure_nobar()
+        fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
+                                                                         tmax,
+                                                                         fmin,
+                                                                         fmax),
+                     y=0.04)
+        for idx, ch_type in enumerate(types):
+            ax = plt.subplot(1, len(types), idx + 1)
+            plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
+                             fmin=fmin, fmax=fmax, layout=layout,
+                             baseline=baseline, mode=mode, cmap=None,
+                             title=ch_type, vmin=None, vmax=None,
+                             axes=ax)
 
     def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
                   tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
                   layout=None, cmap='RdBu_r', title=None, dB=False,
-                  colorbar=True, layout_scale=0.945, show=True):
+                  colorbar=True, layout_scale=0.945, show=True,
+                  border='none', fig_facecolor='k', font_color='w'):
         """Plot TFRs in a topography with images
 
         Parameters
@@ -676,37 +829,48 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
             on the canvas.
         show : bool
             Call pyplot.show() at the end.
-        verbose : bool, str, int, or None
-            If not None, override default verbose level (see mne.verbose).
+        border : str
+            matplotlib borders style to be used for each sensor plot.
+        fig_facecolor : str | obj
+            The figure face color. Defaults to black.
+        font_color: str | obj
+            The color of tick labels in the colorbar. Defaults to white.
+
+        Returns
+        -------
+        fig : matplotlib.figure.Figure
+            The figure containing the topography.
         """
         from ..viz.topo import _imshow_tfr, _plot_topo
+        import matplotlib.pyplot as plt
         times = self.times.copy()
         freqs = self.freqs
         data = self.data
         info = self.info
 
-        if picks is not None:
-            data = data[picks]
-            info = pick_info(info, picks)
+        info, data, picks = _prepare_picks(info, data, picks)
+        data = data[picks]
 
         data, times, freqs, vmin, vmax = \
             _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
                          mode, baseline, vmin, vmax, dB)
 
         if layout is None:
-            from mne.layouts.layout import find_layout
+            from mne import find_layout
             layout = find_layout(self.info)
-
-        imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
-
-        fig = _plot_topo(info=info, times=times,
-                         show_func=imshow, layout=layout,
-                         colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                         layout_scale=layout_scale, title=title, border='w',
-                         x_label='Time (ms)', y_label='Frequency (Hz)')
+        onselect_callback = partial(self._onselect, baseline=baseline,
+                                    mode=mode, layout=layout)
+        imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
+                         onselect=onselect_callback)
+
+        fig = _plot_topo(info=info, times=times, show_func=imshow,
+                         layout=layout, colorbar=colorbar, vmin=vmin,
+                         vmax=vmax, cmap=cmap, layout_scale=layout_scale,
+                         title=title, border=border, x_label='Time (ms)',
+                         y_label='Frequency (Hz)', fig_facecolor=fig_facecolor,
+                         font_color=font_color)
 
         if show:
-            import matplotlib.pyplot as plt
             plt.show()
 
         return fig
@@ -746,7 +910,7 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         s = "time : [%f, %f]" % (self.times[0], self.times[-1])
         s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
         s += ", nave : %d" % self.nave
-        s += ', channels : %d' % self.data.shape[1]
+        s += ', channels : %d' % self.data.shape[0]
         return "<AverageTFR  |  %s>" % s
 
     def apply_baseline(self, baseline, mode='mean'):
@@ -772,17 +936,15 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         self.data = rescale(self.data, self.times, baseline, mode, copy=False)
 
     def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
-                     ch_type='mag', baseline=None, mode='mean',
-                     layout=None, vmin=None, vmax=None, cmap='RdBu_r',
-                     sensors='k,', colorbar=True, unit=None, res=64, size=2,
-                     format='%1.1e', show_names=False, title=None,
-                     axes=None, show=True):
+                     ch_type=None, baseline=None, mode='mean',
+                     layout=None, vmin=None, vmax=None, cmap=None,
+                     sensors=True, colorbar=True, unit=None, res=64, size=2,
+                     cbar_fmt='%1.1e', show_names=False, title=None,
+                     axes=None, show=True, outlines='head', head_pos=None):
         """Plot topographic maps of time-frequency intervals of TFR data
 
         Parameters
         ----------
-        tfr : AvereageTFR
-            The AvereageTFR object.
         tmin : None | float
             The first time instant to display. If None the first time point
             available is used.
@@ -795,9 +957,10 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
         fmax : None | float
             The last frequency to display. If None the last frequency
             available is used.
-        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
             The channel type to plot. For 'grad', the gradiometers are
             collected in pairs and the RMS for each pair is plotted.
+            If None, then channels are chosen in the order given above.
         baseline : tuple or list of length 2
             The time interval to apply rescaling / baseline correction.
             If None do not apply it. If baseline is (a, b)
@@ -818,30 +981,33 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
             file is inferred from the data; if no appropriate layout file was
             found, the layout is automatically generated from the sensor
             locations.
-        vmin : float | callable
-            The value specfying the lower bound of the color range.
-            If None, and vmax is None, -vmax is used. Else np.min(data).
-            If callable, the output equals vmin(data).
-        vmax : float | callable
-            The value specfying the upper bound of the color range.
-            If None, the maximum absolute value is used. If vmin is None,
-            but vmax is not, defaults to np.min(data).
-            If callable, the output equals vmax(data).
-        cmap : matplotlib colormap
-            Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
-            'Reds'.
+        vmin : float | callable | None
+            The value specifying the lower bound of the color range. If None,
+            and vmax is None, -vmax is used. Else np.min(data) or in case
+            data contains only positive values 0. If callable, the output
+            equals vmin(data). Defaults to None.
+        vmax : float | callable | None
+            The value specifying the upper bound of the color range. If None,
+            the maximum value is used. If callable, the output equals
+            vmax(data). Defaults to None.
+        cmap : matplotlib colormap | None
+            Colormap. If None and the plotted data is all positive, defaults to
+            'Reds'. If None and data contains also negative values, defaults to
+            'RdBu_r'. Defaults to None.
         sensors : bool | str
             Add markers for sensor locations to the plot. Accepts matplotlib
-            plot format string (e.g., 'r+' for red plusses).
+            plot format string (e.g., 'r+' for red plusses). If True, a circle
+            will be used (via .add_artist). Defaults to True.
         colorbar : bool
             Plot a colorbar.
-        unit : str | None
-            The unit of the channel type used for colorbar labels.
+        unit : dict | str | None
+            The unit of the channel type used for colorbar label. If
+            scale is None the unit is automatically determined.
         res : int
             The resolution of the topomap image (n pixels along each side).
         size : float
             Side length per topomap in inches.
-        format : str
+        cbar_fmt : str
             String format for colorbar values.
         show_names : bool | callable
             If True, show channel names on top of the map. If a callable is
@@ -855,6 +1021,22 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
             The axes to plot to. If None the axes is defined automatically.
         show : bool
             Call pyplot.show() at the end.
+        outlines : 'head' | 'skirt' | dict | None
+            The outlines to be drawn. If 'head', the default head scheme will
+            be drawn. If 'skirt' the head scheme will be drawn, but sensors are
+            allowed to be plotted outside of the head circle. If dict, each key
+            refers to a tuple of x and y positions, the values in 'mask_pos'
+            will serve as image mask, and the 'autoshrink' (bool) field will
+            trigger automated shrinking of the positions due to points outside
+            the outline. Alternatively, a matplotlib patch object can be passed
+            for advanced masking options, either directly or as a function that
+            returns patches (required for multi-axis plots). If None, nothing
+            will be drawn. Defaults to 'head'.
+        head_pos : dict | None
+            If None (default), the sensors are positioned such that they span
+            the head circle. If dict, can have entries 'center' (tuple) and
+            'scale' (tuple) for what the center and scale of the head should be
+            relative to the electrode locations.
 
         Returns
         -------
@@ -866,19 +1048,118 @@ class AverageTFR(ContainsMixin, PickDropChannelsMixin):
                                 fmax=fmax, ch_type=ch_type, baseline=baseline,
                                 mode=mode, layout=layout, vmin=vmin, vmax=vmax,
                                 cmap=cmap, sensors=sensors, colorbar=colorbar,
-                                unit=unit, res=res, size=size, format=format,
-                                show_names=show_names, title=title, axes=axes,
-                                show=show)
+                                unit=unit, res=res, size=size,
+                                cbar_fmt=cbar_fmt, show_names=show_names,
+                                title=title, axes=axes, show=show,
+                                outlines=outlines, head_pos=head_pos)
+
+    def save(self, fname, overwrite=False):
+        """Save TFR object to hdf5 file
+
+        Parameters
+        ----------
+        fname : str
+            The file name, which should end with -tfr.h5 .
+        overwrite : bool
+            If True, overwrite file (if it exists). Defaults to false
+        """
+        write_tfrs(fname, self, overwrite=overwrite)
+
+
+def _prepare_write_tfr(tfr, condition):
+    """Aux function"""
+    return (condition, dict(times=tfr.times, freqs=tfr.freqs,
+                            data=tfr.data, info=tfr.info, nave=tfr.nave,
+                            comment=tfr.comment, method=tfr.method))
+
+
+def write_tfrs(fname, tfr, overwrite=False):
+    """Write a TFR dataset to hdf5.
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -tfr.h5
+    tfr : AverageTFR instance, or list of AverageTFR instances
+        The TFR dataset, or list of TFR datasets, to save in one file.
+        Note. If .comment is not None, a name will be generated on the fly,
+        based on the order in which the TFR objects are passed
+    overwrite : bool
+        If True, overwrite file (if it exists). Defaults to False.
+
+    See Also
+    --------
+    read_tfrs
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    out = []
+    if not isinstance(tfr, (list, tuple)):
+        tfr = [tfr]
+    for ii, tfr_ in enumerate(tfr):
+        comment = ii if tfr_.comment is None else tfr_.comment
+        out.append(_prepare_write_tfr(tfr_, condition=comment))
+    write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
+
+
+def read_tfrs(fname, condition=None):
+    """
+    Read TFR datasets from hdf5 file.
+
+    Parameters
+    ----------
+    fname : string
+        The file name, which should end with -tfr.h5 .
+    condition : int or str | list of int or str | None
+        The condition to load. If None, all conditions will be returned.
+        Defaults to None.
+
+    See Also
+    --------
+    write_tfrs
+
+    Returns
+    -------
+    tfrs : list of instances of AverageTFR | instance of AverageTFR
+        Depending on `condition` either the TFR object or a list of multiple
+        TFR objects.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+
+    check_fname(fname, 'tfr', ('-tfr.h5',))
+
+    logger.info('Reading %s ...' % fname)
+    tfr_data = read_hdf5(fname, title='mnepython')
+    for k, tfr in tfr_data:
+        tfr['info'] = Info(tfr['info'])
+
+    if condition is not None:
+        tfr_dict = dict(tfr_data)
+        if condition not in tfr_dict:
+            keys = ['%s' % k for k in tfr_dict]
+            raise ValueError('Cannot find condition ("{0}") in this file. '
+                             'I can give you "{1}""'
+                             .format(condition, " or ".join(keys)))
+        out = AverageTFR(**tfr_dict[condition])
+    else:
+        out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
+    return out
 
 
-def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
-               return_itc=True, decim=1, n_jobs=1):
+ at verbose
+def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
+               return_itc=True, decim=1, n_jobs=1, picks=None, verbose=None):
     """Compute Time-Frequency Representation (TFR) using Morlet wavelets
 
     Parameters
     ----------
-    epochs : Epochs
-        The epochs.
+    inst : Epochs | Evoked
+        The epochs or evoked object.
     freqs : ndarray, shape (n_freqs,)
         The frequencies in Hz.
     n_cycles : float | ndarray, shape (n_freqs,)
@@ -887,10 +1168,174 @@ def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
         The fft based convolution or not.
     return_itc : bool
         Return intertrial coherence (ITC) as well as averaged power.
+        Must be ``False`` for evoked data.
     decim : int
         The decimation factor on the time axis. To reduce memory usage.
     n_jobs : int
         The number of jobs to run in parallel.
+    picks : array-like of int | None
+        The indices of the channels to plot. If None all available
+        channels are displayed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : instance of AverageTFR
+        The averaged power.
+    itc : instance of AverageTFR
+        The intertrial coherence (ITC). Only returned if return_itc
+        is True.
+
+    See Also
+    --------
+    tfr_multitaper, tfr_stockwell
+    """
+    data = _get_data(inst, return_itc)
+    info = inst.info
+
+    info, data, picks = _prepare_picks(info, data, picks)
+    data = data = data[:, picks, :]
+
+    power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
+                                    frequencies=freqs,
+                                    n_cycles=n_cycles, n_jobs=n_jobs,
+                                    use_fft=use_fft, decim=decim,
+                                    zero_mean=True)
+    times = inst.times[::decim].copy()
+    nave = len(data)
+    out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
+    if return_itc:
+        out = (out, AverageTFR(info, itc, times, freqs, nave,
+                               method='morlet-itc'))
+    return out
+
+
+def _prepare_picks(info, data, picks):
+    if picks is None:
+        picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
+                           exclude='bads')
+    if np.array_equal(picks, np.arange(len(data))):
+        picks = slice(None)
+    else:
+        info = pick_info(info, picks)
+
+    return info, data, picks
+
+
+ at verbose
+def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
+                       use_fft=True, n_cycles=7, decim=1, n_jobs=1,
+                       zero_mean=True, verbose=None):
+    """Compute time induced power and inter-trial phase-locking factor
+
+    The time frequency decomposition is done with DPSS wavelets
+
+    Parameters
+    ----------
+    data : np.ndarray, shape (n_epochs, n_channels, n_times)
+        The input data.
+    sfreq : float
+        sampling Frequency
+    frequencies : np.ndarray, shape (n_frequencies,)
+        Array of frequencies of interest
+    time_bandwidth : float
+        Time x (Full) Bandwidth product.
+        The number of good tapers (low-bias) is chosen automatically based on
+        this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
+    use_fft : bool
+        Compute transform with fft based convolutions or temporal
+        convolutions. Defaults to True.
+    n_cycles : float | np.ndarray shape (n_frequencies,)
+        Number of cycles. Fixed number or one per frequency. Defaults to 7.
+    decim: int
+        Temporal decimation factor. Defaults to 1.
+    n_jobs : int
+        The number of CPUs used in parallel. All CPUs are used in -1.
+        Requires joblib package. Defaults to 1.
+    zero_mean : bool
+        Make sure the wavelets are zero mean. Defaults to True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    power : np.ndarray, shape (n_channels, n_frequencies, n_times)
+        Induced power. Squared amplitude of time-frequency coefficients.
+    itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
+        Phase locking value.
+    """
+    n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+    logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
+    n_frequencies = len(frequencies)
+    logger.info('Multitaper time-frequency analysis for %d frequencies',
+                n_frequencies)
+
+    # Precompute wavelets for given frequency range to save time
+    Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
+                       time_bandwidth=time_bandwidth, zero_mean=zero_mean)
+    n_taps = len(Ws)
+    logger.info('Using %d tapers', n_taps)
+    n_times_wavelets = Ws[0][0].shape[0]
+    if n_times <= n_times_wavelets:
+        warnings.warn("Time windows are as long or longer than the epoch. "
+                      "Consider reducing n_cycles.")
+    psd = np.zeros((n_channels, n_frequencies, n_times))
+    itc = np.zeros((n_channels, n_frequencies, n_times))
+    parallel, my_time_frequency, _ = parallel_func(_time_frequency,
+                                                   n_jobs)
+    for m in range(n_taps):
+        psd_itc = parallel(my_time_frequency(data[:, c, :],
+                                             Ws[m], use_fft, decim)
+                           for c in range(n_channels))
+        for c, (psd_c, itc_c) in enumerate(psd_itc):
+            psd[c, :, :] += psd_c
+            itc[c, :, :] += itc_c
+    psd /= n_taps
+    itc /= n_taps
+    return psd, itc
+
+
+ at verbose
+def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
+                   use_fft=True, return_itc=True, decim=1, n_jobs=1,
+                   picks=None, verbose=None):
+    """Compute Time-Frequency Representation (TFR) using DPSS wavelets
+
+    Parameters
+    ----------
+    inst : Epochs | Evoked
+        The epochs or evoked object.
+    freqs : ndarray, shape (n_freqs,)
+        The frequencies in Hz.
+    n_cycles : float | ndarray, shape (n_freqs,)
+        The number of cycles globally or for each frequency.
+        The time-window length is thus T = n_cycles / freq.
+    time_bandwidth : float, (optional)
+        Time x (Full) Bandwidth product. Should be >= 2.0.
+        Choose this along with n_cycles to get desired frequency resolution.
+        The number of good tapers (least leakage from far away frequencies)
+        is chosen automatically based on this to floor(time_bandwidth - 1).
+        Default is 4.0 (3 good tapers).
+        E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
+        If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
+    use_fft : bool
+        The fft based convolution or not.
+        Defaults to True.
+    return_itc : bool
+        Return intertrial coherence (ITC) as well as averaged power.
+        Defaults to True.
+    decim : int
+        The decimation factor on the time axis. To reduce memory usage.
+        Note than this is brute force decimation, no anti-aliasing is done.
+        Defaults to 1.
+    n_jobs : int
+        The number of jobs to run in parallel. Defaults to 1.
+    picks : array-like of int | None
+        The indices of the channels to plot. If None all available
+        channels are displayed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
@@ -899,18 +1344,33 @@ def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
     itc : AverageTFR
         The intertrial coherence (ITC). Only returned if return_itc
         is True.
+
+    See Also
+    --------
+    tfr_multitaper, tfr_stockwell
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
-    data = epochs.get_data()
-    picks = pick_types(epochs.info, meg=True, eeg=True)
-    info = pick_info(epochs.info, picks)
-    data = data[:, picks, :]
-    power, itc = _induced_power(data, Fs=info['sfreq'], frequencies=freqs,
-                                n_cycles=n_cycles, n_jobs=n_jobs,
-                                use_fft=use_fft, decim=decim,
-                                zero_mean=True)
-    times = epochs.times[::decim].copy()
+
+    data = _get_data(inst, return_itc)
+    info = inst.info
+
+    info, data, picks = _prepare_picks(info, data, picks)
+    data = data = data[:, picks, :]
+
+    power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
+                                    frequencies=freqs, n_cycles=n_cycles,
+                                    time_bandwidth=time_bandwidth,
+                                    use_fft=use_fft, decim=decim,
+                                    n_jobs=n_jobs, zero_mean=True,
+                                    verbose='INFO')
+    times = inst.times[::decim].copy()
     nave = len(data)
-    out = AverageTFR(info, power, times, freqs, nave)
+    out = AverageTFR(info, power, times, freqs, nave,
+                     method='mutlitaper-power')
     if return_itc:
-        out = (out, AverageTFR(info, itc, times, freqs, nave))
+        out = (out, AverageTFR(info, itc, times, freqs, nave,
+                               method='mutlitaper-itc'))
     return out
diff --git a/mne/transforms.py b/mne/transforms.py
index 38278ec..fdc405c 100644
--- a/mne/transforms.py
+++ b/mne/transforms.py
@@ -4,6 +4,7 @@
 # License: BSD (3-clause)
 
 import os
+from os import path as op
 import glob
 import numpy as np
 from numpy import sin, cos
@@ -13,7 +14,7 @@ from .io.constants import FIFF
 from .io.open import fiff_open
 from .io.tag import read_tag
 from .io.write import start_file, end_file, write_coord_trans
-from .utils import check_fname, logger
+from .utils import check_fname, logger, deprecated
 from .externals.six import string_types
 
 
@@ -25,33 +26,98 @@ als_ras_trans = np.array([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0],
 als_ras_trans_mm = als_ras_trans * [0.001, 0.001, 0.001, 1]
 
 
+_str_to_frame = dict(meg=FIFF.FIFFV_COORD_DEVICE,
+                     mri=FIFF.FIFFV_COORD_MRI,
+                     mri_voxel=FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
+                     head=FIFF.FIFFV_COORD_HEAD,
+                     mni_tal=FIFF.FIFFV_MNE_COORD_MNI_TAL,
+                     ras=FIFF.FIFFV_MNE_COORD_RAS,
+                     fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL,
+                     ctf_head=FIFF.FIFFV_MNE_COORD_CTF_HEAD,
+                     ctf_meg=FIFF.FIFFV_MNE_COORD_CTF_DEVICE)
+_frame_to_str = dict((val, key) for key, val in _str_to_frame.items())
+
+_verbose_frames = {FIFF.FIFFV_COORD_UNKNOWN: 'unknown',
+                   FIFF.FIFFV_COORD_DEVICE: 'MEG device',
+                   FIFF.FIFFV_COORD_ISOTRAK: 'isotrak',
+                   FIFF.FIFFV_COORD_HPI: 'hpi',
+                   FIFF.FIFFV_COORD_HEAD: 'head',
+                   FIFF.FIFFV_COORD_MRI: 'MRI (surface RAS)',
+                   FIFF.FIFFV_MNE_COORD_MRI_VOXEL: 'MRI voxel',
+                   FIFF.FIFFV_COORD_MRI_SLICE: 'MRI slice',
+                   FIFF.FIFFV_COORD_MRI_DISPLAY: 'MRI display',
+                   FIFF.FIFFV_MNE_COORD_CTF_DEVICE: 'CTF MEG device',
+                   FIFF.FIFFV_MNE_COORD_CTF_HEAD: 'CTF/4D/KIT head',
+                   FIFF.FIFFV_MNE_COORD_RAS: 'RAS (non-zero origin)',
+                   FIFF.FIFFV_MNE_COORD_MNI_TAL: 'MNI Talairach',
+                   FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ: 'Talairach (MNI z > 0)',
+                   FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ: 'Talairach (MNI z < 0)',
+                   -1: 'unknown'}
+
+
+def _to_const(cf):
+    """Helper to convert string or int coord frame into int"""
+    if isinstance(cf, string_types):
+        if cf not in _str_to_frame:
+            raise ValueError('Unknown cf %s' % cf)
+        cf = _str_to_frame[cf]
+    elif not isinstance(cf, int):
+        raise TypeError('cf must be str or int, not %s' % type(cf))
+    return cf
+
+
+class Transform(dict):
+    """A transform
+
+    Parameters
+    ----------
+    fro : str | int
+        The starting coordinate frame.
+    to : str | int
+        The ending coordinate frame.
+    trans : array-like, shape (4, 4)
+        The transformation matrix.
+    """
+    def __init__(self, fro, to, trans):
+        super(Transform, self).__init__()
+        # we could add some better sanity checks here
+        fro = _to_const(fro)
+        to = _to_const(to)
+        trans = np.asarray(trans, dtype=np.float64)
+        if trans.shape != (4, 4):
+            raise ValueError('Transformation must be shape (4, 4) not %s'
+                             % (trans.shape,))
+        self['from'] = fro
+        self['to'] = to
+        self['trans'] = trans
+
+    def __repr__(self):
+        return ('<Transform  |  %s->%s>\n%s'
+                % (_coord_frame_name(self['from']),
+                   _coord_frame_name(self['to']), self['trans']))
+
+    @property
+    def from_str(self):
+        return _coord_frame_name(self['from'])
+
+    @property
+    def to_str(self):
+        return _coord_frame_name(self['to'])
+
+
 def _coord_frame_name(cframe):
-    """Map integers to human-readable names"""
-    types = {FIFF.FIFFV_COORD_UNKNOWN: 'unknown',
-             FIFF.FIFFV_COORD_DEVICE: 'MEG device',
-             FIFF.FIFFV_COORD_ISOTRAK: 'isotrak',
-             FIFF.FIFFV_COORD_HPI: 'hpi',
-             FIFF.FIFFV_COORD_HEAD: 'head',
-             FIFF.FIFFV_COORD_MRI: 'MRI (surface RAS)',
-             FIFF.FIFFV_MNE_COORD_MRI_VOXEL: 'MRI voxel',
-             FIFF.FIFFV_COORD_MRI_SLICE: 'MRI slice',
-             FIFF.FIFFV_COORD_MRI_DISPLAY: 'MRI display',
-             FIFF.FIFFV_MNE_COORD_CTF_DEVICE: 'CTF MEG device',
-             FIFF.FIFFV_MNE_COORD_CTF_HEAD: 'CTF/4D/KIT head',
-             FIFF.FIFFV_MNE_COORD_RAS: 'RAS (non-zero origin)',
-             FIFF.FIFFV_MNE_COORD_MNI_TAL: 'MNI Talairach',
-             FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ: 'Talairach (MNI z > 0)',
-             FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ: 'Talairach (MNI z < 0)',
-             -1: 'unknown'}
-    return types.get(cframe, 'unknown')
+    """Map integers to human-readable (verbose) names"""
+    return _verbose_frames.get(int(cframe), 'unknown')
 
 
 def _print_coord_trans(t, prefix='Coordinate transformation: '):
     logger.info(prefix + '%s -> %s'
                 % (_coord_frame_name(t['from']), _coord_frame_name(t['to'])))
-    for tt in t['trans']:
-        logger.info('    % 8.6f % 8.6f % 8.6f    %7.2f mm' %
-                    (tt[0], tt[1], tt[2], 1000 * tt[3]))
+    for ti, tt in enumerate(t['trans']):
+        scale = 1000. if ti != 3 else 1.
+        text = ' mm' if ti != 3 else ''
+        logger.info('    % 8.6f % 8.6f % 8.6f    %7.2f%s' %
+                    (tt[0], tt[1], tt[2], scale * tt[3], text))
 
 
 def _find_trans(subject, subjects_dir=None):
@@ -77,7 +143,7 @@ def apply_trans(trans, pts, move=True):
 
     Parameters
     ----------
-    trans : array, shape = (4, 4)
+    trans : array, shape = (4, 4) | instance of Transform
         Transform matrix.
     pts : array, shape = (3,) | (n, 3)
         Array with coordinates for one or n points.
@@ -89,17 +155,15 @@ def apply_trans(trans, pts, move=True):
     transformed_pts : shape = (3,) | (n, 3)
         Transformed point(s).
     """
+    if isinstance(trans, dict):
+        trans = trans['trans']
     trans = np.asarray(trans)
     pts = np.asarray(pts)
     if pts.size == 0:
         return pts.copy()
 
     # apply rotation & scale
-    if pts.ndim == 1:
-        out_pts = np.dot(trans[:3, :3], pts)
-    else:
-        out_pts = np.dot(pts, trans[:3, :3].T)
-
+    out_pts = np.dot(pts, trans[:3, :3].T)
     # apply translation
     if move is True:
         transl = trans[:3, 3]
@@ -226,18 +290,83 @@ def translation(x=0, y=0, z=0):
     return m
 
 
-def _get_mri_head_t_from_trans_file(fname):
-    """Helper to convert "-trans.txt" to "-trans.fif" mri-type equivalent"""
-    # Read a Neuromag -> FreeSurfer transformation matrix
-    t = np.genfromtxt(fname)
-    if t.ndim != 2 or t.shape != (4, 4):
-        raise RuntimeError('File "%s" did not have 4x4 entries' % fname)
-    t = {'from': FIFF.FIFFV_COORD_HEAD, 'to': FIFF.FIFFV_COORD_MRI, 'trans': t}
-    return invert_transform(t)
+def _ensure_trans(trans, fro='mri', to='head'):
+    """Helper to ensure we have the proper transform"""
+    if isinstance(fro, string_types):
+        from_str = fro
+        from_const = _str_to_frame[fro]
+    else:
+        from_str = _frame_to_str[fro]
+        from_const = fro
+    del fro
+    if isinstance(to, string_types):
+        to_str = to
+        to_const = _str_to_frame[to]
+    else:
+        to_str = _frame_to_str[to]
+        to_const = to
+    del to
+    err_str = 'trans must go %s<->%s, provided' % (from_str, to_str)
+    if trans is None:
+        raise ValueError('%s None' % err_str)
+    if set([trans['from'], trans['to']]) != set([from_const, to_const]):
+        raise ValueError('%s trans is %s->%s' % (err_str,
+                                                 _frame_to_str[trans['from']],
+                                                 _frame_to_str[trans['to']]))
+    if trans['from'] != from_const:
+        trans = invert_transform(trans)
+    return trans
+
+
+def _get_mri_head_t(trans):
+    """Get mri_head_t (from=mri, to=head) from mri filename"""
+    if isinstance(trans, string_types):
+        if not op.isfile(trans):
+            raise IOError('trans file "%s" not found' % trans)
+        if op.splitext(trans)[1] in ['.fif', '.gz']:
+            mri_head_t = read_trans(trans)
+        else:
+            # convert "-trans.txt" to "-trans.fif" mri-type equivalent
+            t = np.genfromtxt(trans)
+            if t.ndim != 2 or t.shape != (4, 4):
+                raise RuntimeError('File "%s" did not have 4x4 entries'
+                                   % trans)
+            mri_head_t = Transform('head', 'mri', t)
+    elif isinstance(trans, dict):
+        mri_head_t = trans
+        trans = 'dict'
+    elif trans is None:
+        mri_head_t = Transform('head', 'mri', np.eye(4))
+        trans = 'identity'
+    else:
+        raise ValueError('trans type %s not known, must be str, dict, or None'
+                         % type(trans))
+    # it's usually a head->MRI transform, so we probably need to invert it
+    mri_head_t = _ensure_trans(mri_head_t, 'mri', 'head')
+    return mri_head_t, trans
 
 
 def combine_transforms(t_first, t_second, fro, to):
-    """Combine two transforms"""
+    """Combine two transforms
+
+    Parameters
+    ----------
+    t_first : dict
+        First transform.
+    t_second : dict
+        Second transform.
+    fro : int
+        From coordinate frame.
+    to : int
+        To coordinate frame.
+
+    Returns
+    -------
+    trans : dict
+        Combined transformation.
+    """
+    fro = _to_const(fro)
+    to = _to_const(to)
     if t_first['from'] != fro:
         raise RuntimeError('From mismatch: %s ("%s") != %s ("%s")'
                            % (t_first['from'],
@@ -254,8 +383,7 @@ def combine_transforms(t_first, t_second, fro, to):
                            % (t_second['to'],
                               _coord_frame_name(t_second['to']),
                               to, _coord_frame_name(to)))
-    return {'from': fro, 'to': to, 'trans': np.dot(t_second['trans'],
-                                                   t_first['trans'])}
+    return Transform(fro, to, np.dot(t_second['trans'], t_first['trans']))
 
 
 def read_trans(fname):
@@ -271,10 +399,10 @@ def read_trans(fname):
     trans : dict
         The transformation dictionary from the fif file.
 
-    Notes
-    -----
-    The trans dictionary has the following structure:
-    trans = {'from': int, 'to': int, 'trans': numpy.ndarray <4x4>}
+    See Also
+    --------
+    write_trans
+    Transform
     """
     fid, tree, directory = fiff_open(fname)
 
@@ -299,9 +427,12 @@ def write_trans(fname, trans):
         The name of the file, which should end in '-trans.fif'.
     trans : dict
         Trans file data, as returned by read_trans.
+
+    See Also
+    --------
+    read_trans
     """
     check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz'))
-
     fid = start_file(fname)
     write_coord_trans(fid, trans)
     end_file(fid)
@@ -309,15 +440,18 @@ def write_trans(fname, trans):
 
 def invert_transform(trans):
     """Invert a transformation between coordinate systems
-    """
-    itrans = {'to': trans['from'], 'from': trans['to'],
-              'trans': linalg.inv(trans['trans'])}
-    return itrans
 
+    Parameters
+    ----------
+    trans : dict
+        Transform to invert.
 
-_frame_dict = dict(meg=FIFF.FIFFV_COORD_DEVICE,
-                   mri=FIFF.FIFFV_COORD_MRI,
-                   head=FIFF.FIFFV_COORD_HEAD)
+    Returns
+    -------
+    inv_trans : dict
+        Inverse transform.
+    """
+    return Transform(trans['to'], trans['from'], linalg.inv(trans['trans']))
 
 
 def transform_surface_to(surf, dest, trans):
@@ -325,9 +459,9 @@ def transform_surface_to(surf, dest, trans):
 
     Parameters
     ----------
-    src : dict
+    surf : dict
         Surface.
-    orig: 'meg' | 'mri' | 'head' | int
+    dest : 'meg' | 'mri' | 'head' | int
         Destination coordinate system. Can be an integer for using
         FIFF types.
     trans : dict
@@ -339,25 +473,21 @@ def transform_surface_to(surf, dest, trans):
         Transformed source space. Data are modified in-place.
     """
     if isinstance(dest, string_types):
-        if dest not in _frame_dict:
+        if dest not in _str_to_frame:
             raise KeyError('dest must be one of %s, not "%s"'
-                           % [list(_frame_dict.keys()), dest])
-        dest = _frame_dict[dest]  # convert to integer
+                           % (list(_str_to_frame.keys()), dest))
+        dest = _str_to_frame[dest]  # convert to integer
     if surf['coord_frame'] == dest:
         return surf
 
-    if trans['to'] == surf['coord_frame'] and trans['from'] == dest:
-        trans = invert_transform(trans)
-    elif trans['from'] != surf['coord_frame'] or trans['to'] != dest:
-        raise ValueError('Cannot transform the source space using this '
-                         'coordinate transformation')
-
+    trans = _ensure_trans(trans, int(surf['coord_frame']), dest)
     surf['coord_frame'] = dest
-    surf['rr'] = apply_trans(trans['trans'], surf['rr'])
-    surf['nn'] = apply_trans(trans['trans'], surf['nn'], move=False)
+    surf['rr'] = apply_trans(trans, surf['rr'])
+    surf['nn'] = apply_trans(trans, surf['nn'], move=False)
     return surf
 
 
+ at deprecated('transform_coordinates is deprecated and will be removed in v0.11')
 def transform_coordinates(filename, pos, orig, dest):
     """Transform coordinates between various MRI-related coordinate frames
 
@@ -366,6 +496,7 @@ def transform_coordinates(filename, pos, orig, dest):
     filename: string
         Name of a fif file containing the coordinate transformations
         This file can be conveniently created with mne_collect_transforms
+        or ``collect_transforms``.
     pos: array of shape N x 3
         array of locations to transform (in meters)
     orig: 'meg' | 'mri'
@@ -382,8 +513,8 @@ def transform_coordinates(filename, pos, orig, dest):
     trans_pos: array of shape N x 3
         The transformed locations
 
-    Example
-    -------
+    Examples
+    --------
     transform_coordinates('all-trans.fif', np.eye(3), 'meg', 'fs_tal')
     transform_coordinates('all-trans.fif', np.eye(3), 'mri', 'mni_tal')
     """
@@ -421,9 +552,9 @@ def transform_coordinates(filename, pos, orig, dest):
     #
     #   Check we have everything we need
     #
-    if ((orig == FIFF.FIFFV_COORD_HEAD and T0 is None) or (T1 is None)
-            or (T2 is None) or (dest == FIFF.FIFFV_MNE_COORD_FS_TAL and
-                                ((T3minus is None) or (T3minus is None)))):
+    if ((orig == FIFF.FIFFV_COORD_HEAD and T0 is None) or (T1 is None) or
+            (T2 is None) or (dest == FIFF.FIFFV_MNE_COORD_FS_TAL and
+                             ((T3minus is None) or (T3minus is None)))):
         raise ValueError('All required coordinate transforms not found')
 
     #
@@ -463,77 +594,96 @@ def transform_coordinates(filename, pos, orig, dest):
     return trans_pos
 
 
-# @verbose
-# def transform_meg_chs(chs, trans, verbose=None):
-#     """
-#     %
-#     % [res, count] = fiff_transform_meg_chs(chs,trans)
-#     %
-#     % Move to another coordinate system in MEG channel channel info
-#     % Count gives the number of channels transformed
-#     %
-#     % NOTE: Only the coil_trans field is modified by this routine, not
-#     % loc which remains to reflect the original data read from the fif file
-#     %
-#     %
-#
-#     XXX
-#     """
-#
-#     res = copy.deepcopy(chs)
-#
-#     count = 0
-#     t = trans['trans']
-#     for ch in res:
-#         if (ch['kind'] == FIFF.FIFFV_MEG_CH
-#                                     or ch['kind'] == FIFF.FIFFV_REF_MEG_CH):
-#             if (ch['coord_frame'] == trans['from']
-#                                             and ch['coil_trans'] is not None):
-#                 ch['coil_trans'] = np.dot(t, ch['coil_trans'])
-#                 ch['coord_frame'] = trans['to']
-#                 count += 1
-#
-#     if count > 0:
-#         logger.info('    %d MEG channel locations transformed' % count)
-#
-#     return res, count
-
-# @verbose
-# def transform_eeg_chs(chs, trans, verbose=None):
-#     """
-#     %
-#     % [res, count] = fiff_transform_eeg_chs(chs,trans)
-#     %
-#     % Move to another coordinate system in EEG channel channel info
-#     % Count gives the number of channels transformed
-#     %
-#     % NOTE: Only the eeg_loc field is modified by this routine, not
-#     % loc which remains to reflect the original data read from the fif file
-#     %
-#
-#     XXX
-#     """
-#     res = copy.deepcopy(chs)
-#
-#     count = 0
-#     #
-#     #   Output unaugmented vectors from the transformation
-#     #
-#     t = trans['trans'][:3,:]
-#     for ch in res:
-#         if ch['kind'] == FIFF.FIFFV_EEG_CH:
-#             if (ch['coord_frame'] == trans['from']
-#                                             and ch['eeg_loc'] is not None):
-#                 #
-#                 # Transform the augmented EEG location vectors
-#                 #
-#                 for p in range(ch['eeg_loc'].shape[1]):
-#                     ch['eeg_loc'][:, p] = np.dot(t,
-#                                                 np.r_[ch['eeg_loc'][:,p], 1])
-#                 count += 1
-#                 ch['coord_frame'] = trans['to']
-#
-#     if count > 0:
-#         logger.info('    %d EEG electrode locations transformed\n' % count)
-#
-#     return res, count
+def get_ras_to_neuromag_trans(nasion, lpa, rpa):
+    """Construct a transformation matrix to the MNE head coordinate system
+
+    Construct a transformation matrix from an arbitrary RAS coordinate system
+    to the MNE head coordinate system, in which the x axis passes through the
+    two preauricular points, and the y axis passes through the nasion and is
+    normal to the x axis. (see mne manual, pg. 97)
+
+    Parameters
+    ----------
+    nasion : array_like, shape (3,)
+        Nasion point coordinate.
+    lpa : array_like, shape (3,)
+        Left peri-auricular point coordinate.
+    rpa : array_like, shape (3,)
+        Right peri-auricular point coordinate.
+
+    Returns
+    -------
+    trans : numpy.array, shape = (4, 4)
+        Transformation matrix to MNE head space.
+    """
+    # check input args
+    nasion = np.asarray(nasion)
+    lpa = np.asarray(lpa)
+    rpa = np.asarray(rpa)
+    for pt in (nasion, lpa, rpa):
+        if pt.ndim != 1 or len(pt) != 3:
+            raise ValueError("Points have to be provided as one dimensional "
+                             "arrays of length 3.")
+
+    right = rpa - lpa
+    right_unit = right / linalg.norm(right)
+
+    origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
+
+    anterior = nasion - origin
+    anterior_unit = anterior / linalg.norm(anterior)
+
+    superior_unit = np.cross(right_unit, anterior_unit)
+
+    x, y, z = -origin
+    origin_trans = translation(x, y, z)
+
+    trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
+    trans_r = np.reshape([0, 0, 0, 1], (4, 1))
+    rot_trans = np.hstack((trans_l, trans_r))
+
+    trans = np.dot(rot_trans, origin_trans)
+    return trans
+
+
+ at deprecated('collect_transforms is deprecated and will be removed in v0.11')
+def collect_transforms(fname, xforms):
+    """Collect a set of transforms in a single FIFF file
+
+    Parameters
+    ----------
+    fname : str
+        Filename to save to.
+    xforms : list of dict
+        List of transformations.
+    """
+    check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz'))
+    with start_file(fname) as fid:
+        for xform in xforms:
+            write_coord_trans(fid, xform)
+        end_file(fid)
+
+
+def _sphere_to_cartesian(theta, phi, r):
+    """Transform spherical coordinates to cartesian"""
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    return x, y, z
+
+
+def _polar_to_cartesian(theta, r):
+    """Transform polar coordinates to cartesian"""
+    x = r * np.cos(theta)
+    y = r * np.sin(theta)
+    return x, y
+
+
+def _cartesian_to_sphere(x, y, z):
+    """Transform cartesian coordinates to spherical"""
+    hypotxy = np.hypot(x, y)
+    r = np.hypot(hypotxy, z)
+    elev = np.arctan2(z, hypotxy)
+    az = np.arctan2(y, x)
+    return az, elev, r
diff --git a/mne/utils.py b/mne/utils.py
index 05b9828..43bcb3a 100644
--- a/mne/utils.py
+++ b/mne/utils.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 """Some utility functions"""
 from __future__ import print_function
 
@@ -7,6 +8,7 @@ from __future__ import print_function
 
 import warnings
 import logging
+import time
 from distutils.version import LooseVersion
 import os
 import os.path as op
@@ -15,32 +17,62 @@ import inspect
 from string import Formatter
 import subprocess
 import sys
-from sys import stdout
 import tempfile
 import shutil
 from shutil import rmtree
-import atexit
 from math import log, ceil
 import json
 import ftplib
 import hashlib
+from functools import partial
+import atexit
 
 import numpy as np
-import scipy
-from scipy import linalg
-
+from scipy import linalg, sparse
 
 from .externals.six.moves import urllib
 from .externals.six import string_types, StringIO, BytesIO
 from .externals.decorator import decorator
 
+from .fixes import isclose
+
 logger = logging.getLogger('mne')  # one selection here used across mne-python
 logger.propagate = False  # don't propagate (in case of multiple imports)
 
 
+def _memory_usage(*args, **kwargs):
+    if isinstance(args[0], tuple):
+        args[0][0](*args[0][1], **args[0][2])
+    elif not isinstance(args[0], int):  # can be -1 for current use
+        args[0]()
+    return [-1]
+
+try:
+    from memory_profiler import memory_usage
+except ImportError:
+    memory_usage = _memory_usage
+
+
+def nottest(f):
+    """Decorator to mark a function as not a test"""
+    f.__test__ = False
+    return f
+
+
 ###############################################################################
 # RANDOM UTILITIES
 
+def _get_call_line(in_verbose=False):
+    """Helper to get the call line from within a function"""
+    # XXX Eventually we could auto-triage whether in a `verbose` decorated
+    # function or not.
+    # NB This probably only works for functions that are undecorated,
+    # or decorated by `verbose`.
+    back = 2 if not in_verbose else 4
+    call_frame = inspect.getouterframes(inspect.currentframe())[back][0]
+    return inspect.getframeinfo(call_frame).code_context[0].strip()
+
+
 def _sort_keys(x):
     """Sort and return keys of dict"""
     keys = list(x.keys())  # note: not thread-safe
@@ -142,6 +174,17 @@ def object_diff(a, b, pre=''):
     elif isinstance(a, (StringIO, BytesIO)):
         if a.getvalue() != b.getvalue():
             out += pre + ' StringIO mismatch\n'
+    elif sparse.isspmatrix(a):
+        # sparsity and sparse type of b vs a already checked above by type()
+        if b.shape != a.shape:
+            out += pre + (' sparse matrix a and b shape mismatch'
+                          '(%s vs %s)' % (a.shape, b.shape))
+        else:
+            c = a - b
+            c.eliminate_zeros()
+            if c.nnz > 0:
+                out += pre + (' sparse matrix a and b differ on %s '
+                              'elements' % c.nnz)
     else:
         raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
     return out
@@ -218,9 +261,9 @@ def check_fname(fname, filetype, endings):
     """
     print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
     if not fname.endswith(endings):
-        warnings.warn('This filename does not conform to mne naming convention'
-                      's. All %s files should end with '
-                      '%s' % (filetype, print_endings))
+        warnings.warn('This filename (%s) does not conform to MNE naming '
+                      'conventions. All %s files should end with '
+                      '%s' % (fname, filetype, print_endings))
 
 
 class WrapStdOut(object):
@@ -234,11 +277,13 @@ class WrapStdOut(object):
 class _TempDir(str):
     """Class for creating and auto-destroying temp dir
 
-    This is designed to be used with testing modules.
+    This is designed to be used with testing modules. Instances should be
+    defined inside test functions. Instances defined at module level can not
+    guarantee proper destruction of the temporary directory.
 
-    We cannot simply use __del__() method for cleanup here because the rmtree
-    function may be cleaned up before this object, so we use the atexit module
-    instead.
+    When used at module level, the current use of the __del__() method for
+    cleanup can fail because the rmtree function may be cleaned up before this
+    object (an alternative could be using the atexit module instead).
     """
     def __new__(self):
         new = str.__new__(self, tempfile.mkdtemp())
@@ -246,14 +291,13 @@ class _TempDir(str):
 
     def __init__(self):
         self._path = self.__str__()
-        atexit.register(self.cleanup)
 
-    def cleanup(self):
+    def __del__(self):
         rmtree(self._path, ignore_errors=True)
 
 
 def estimate_rank(data, tol=1e-4, return_singular=False,
-                  copy=True):
+                  norm=True, copy=True):
     """Helper to estimate the rank of data
 
     This function will normalize the rows of the data (typically
@@ -272,6 +316,9 @@ def estimate_rank(data, tol=1e-4, return_singular=False,
     return_singular : bool
         If True, also return the singular values that were used
         to determine the rank.
+    norm : bool
+        If True, data will be scaled by their estimated row-wise norm.
+        Else data are assumed to be scaled. Defaults to True.
     copy : bool
         If False, values in data will be modified in-place during
         rank estimation (saves memory).
@@ -286,9 +333,9 @@ def estimate_rank(data, tol=1e-4, return_singular=False,
     """
     if copy is True:
         data = data.copy()
-    norms = np.sqrt(np.sum(data ** 2, axis=1))
-    norms[norms == 0] = 1.0
-    data /= norms[:, np.newaxis]
+    if norm is True:
+        norms = _compute_row_norms(data)
+        data /= norms[:, np.newaxis]
     s = linalg.svd(data, compute_uv=False, overwrite_a=True)
     rank = np.sum(s >= tol)
     if return_singular is True:
@@ -297,6 +344,13 @@ def estimate_rank(data, tol=1e-4, return_singular=False,
         return rank
 
 
+def _compute_row_norms(data):
+    """Compute scaling based on estimated norm"""
+    norms = np.sqrt(np.sum(data ** 2, axis=1))
+    norms[norms == 0] = 1.0
+    return norms
+
+
 def _reject_data_segments(data, reject, flat, decim, info, tstep):
     """Reject data segments using peak-to-peak amplitude
     """
@@ -332,64 +386,6 @@ def _reject_data_segments(data, reject, flat, decim, info, tstep):
     return data, drop_inds
 
 
-def run_subprocess(command, *args, **kwargs):
-    """Run command using subprocess.Popen
-
-    Run command and wait for command to complete. If the return code was zero
-    then return, otherwise raise CalledProcessError.
-    By default, this will also add stdout= and stderr=subproces.PIPE
-    to the call to Popen to suppress printing to the terminal.
-
-    Parameters
-    ----------
-    command : list of str
-        Command to run as subprocess (see subprocess.Popen documentation).
-    *args, **kwargs : arguments
-        Arguments to pass to subprocess.Popen.
-
-    Returns
-    -------
-    stdout : str
-        Stdout returned by the process.
-    stderr : str
-        Stderr returned by the process.
-    """
-    if 'stderr' not in kwargs:
-        kwargs['stderr'] = subprocess.PIPE
-    if 'stdout' not in kwargs:
-        kwargs['stdout'] = subprocess.PIPE
-
-    # Check the PATH environment variable. If run_subprocess() is to be called
-    # frequently this should be refactored so as to only check the path once.
-    env = kwargs.get('env', os.environ)
-    if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
-        msg = ("Your PATH environment variable contains at least one path "
-               "starting with a tilde ('~') character. Such paths are not "
-               "interpreted correctly from within Python. It is recommended "
-               "that you use '$HOME' instead of '~'.")
-        warnings.warn(msg)
-
-    logger.info("Running subprocess: %s" % str(command))
-    p = subprocess.Popen(command, *args, **kwargs)
-    stdout_, stderr = p.communicate()
-
-    if stdout_.strip():
-        logger.info("stdout:\n%s" % stdout_)
-    if stderr.strip():
-        logger.info("stderr:\n%s" % stderr)
-
-    output = (stdout_, stderr)
-    if p.returncode:
-        print(output)
-        err_fun = subprocess.CalledProcessError.__init__
-        if 'output' in inspect.getargspec(err_fun).args:
-            raise subprocess.CalledProcessError(p.returncode, command, output)
-        else:
-            raise subprocess.CalledProcessError(p.returncode, command)
-
-    return output
-
-
 class _FormatDict(dict):
     """Helper for pformat()"""
     def __missing__(self, key):
@@ -432,14 +428,20 @@ class deprecated(object):
 
     The optional extra argument will be appended to the deprecation message
     and the docstring. Note: to use this with the default value for extra, put
-    in an empty of parentheses:
+    in an empty of parentheses::
 
-    >>> from mne.utils import deprecated
-    >>> deprecated() # doctest: +ELLIPSIS
-    <mne.utils.deprecated object at ...>
+        >>> from mne.utils import deprecated
+        >>> deprecated() # doctest: +ELLIPSIS
+        <mne.utils.deprecated object at ...>
 
-    >>> @deprecated()
-    ... def some_function(): pass
+        >>> @deprecated()
+        ... def some_function(): pass
+
+
+    Parameters
+    ----------
+    extra: string
+        To be added to the deprecation messages.
     """
     # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
     # but with many changes.
@@ -448,16 +450,16 @@ class deprecated(object):
     # sklearn or scikits.learn, so a self-contained example is used above
 
     def __init__(self, extra=''):
-        """
-        Parameters
-        ----------
-        extra: string
-          to be added to the deprecation messages
-
-        """
         self.extra = extra
 
     def __call__(self, obj):
+        """Call
+
+        Parameters
+        ----------
+        obj : object
+            Object to call.
+        """
         if isinstance(obj, type):
             return self._decorate_class(obj)
         else:
@@ -471,14 +473,14 @@ class deprecated(object):
         # FIXME: we should probably reset __new__ for full generality
         init = cls.__init__
 
-        def wrapped(*args, **kwargs):
+        def deprecation_wrapped(*args, **kwargs):
             warnings.warn(msg, category=DeprecationWarning)
             return init(*args, **kwargs)
-        cls.__init__ = wrapped
+        cls.__init__ = deprecation_wrapped
 
-        wrapped.__name__ = '__init__'
-        wrapped.__doc__ = self._update_doc(init.__doc__)
-        wrapped.deprecated_original = init
+        deprecation_wrapped.__name__ = '__init__'
+        deprecation_wrapped.__doc__ = self._update_doc(init.__doc__)
+        deprecation_wrapped.deprecated_original = init
 
         return cls
 
@@ -489,15 +491,15 @@ class deprecated(object):
         if self.extra:
             msg += "; %s" % self.extra
 
-        def wrapped(*args, **kwargs):
+        def deprecation_wrapped(*args, **kwargs):
             warnings.warn(msg, category=DeprecationWarning)
             return fun(*args, **kwargs)
 
-        wrapped.__name__ = fun.__name__
-        wrapped.__dict__ = fun.__dict__
-        wrapped.__doc__ = self._update_doc(fun.__doc__)
+        deprecation_wrapped.__name__ = fun.__name__
+        deprecation_wrapped.__dict__ = fun.__dict__
+        deprecation_wrapped.__doc__ = self._update_doc(fun.__doc__)
 
-        return wrapped
+        return deprecation_wrapped
 
     def _update_doc(self, olddoc):
         newdoc = "DEPRECATED"
@@ -512,368 +514,314 @@ class deprecated(object):
 def verbose(function, *args, **kwargs):
     """Improved verbose decorator to allow functions to override log-level
 
-    Do not call this directly to set global verbosrity level, instead use
+    Do not call this directly to set global verbosity level, instead use
     set_log_level().
 
     Parameters
     ----------
-    function - function
+    function : function
         Function to be decorated by setting the verbosity level.
 
     Returns
     -------
-    dec - function
+    dec : function
         The decorated function
     """
     arg_names = inspect.getargspec(function).args
-
+    default_level = verbose_level = None
     if len(arg_names) > 0 and arg_names[0] == 'self':
         default_level = getattr(args[0], 'verbose', None)
-    else:
-        default_level = None
-
-    if('verbose' in arg_names):
+    if 'verbose' in arg_names:
         verbose_level = args[arg_names.index('verbose')]
-    else:
-        verbose_level = default_level
+    elif 'verbose' in kwargs:
+        verbose_level = kwargs.pop('verbose')
+
+    # This ensures that object.method(verbose=None) will use object.verbose
+    verbose_level = default_level if verbose_level is None else verbose_level
 
     if verbose_level is not None:
         old_level = set_log_level(verbose_level, True)
         # set it back if we get an exception
         try:
-            ret = function(*args, **kwargs)
-        except:
+            return function(*args, **kwargs)
+        finally:
             set_log_level(old_level)
-            raise
-        set_log_level(old_level)
-        return ret
-    else:
-        ret = function(*args, **kwargs)
-        return ret
+    return function(*args, **kwargs)
 
 
-def has_command_line_tools():
-    if 'MNE_ROOT' not in os.environ:
-        return False
-    else:
-        return True
+ at nottest
+def slow_test(f):
+    """Decorator for slow tests"""
+    f.slow_test = True
+    return f
 
 
-requires_mne = np.testing.dec.skipif(not has_command_line_tools(),
-                                     'Requires MNE command line tools')
+ at nottest
+def ultra_slow_test(f):
+    """Decorator for ultra slow tests"""
+    f.ultra_slow_test = True
+    f.slow_test = True
+    return f
 
 
 def has_nibabel(vox2ras_tkr=False):
+    """Determine if nibabel is installed
+
+    Parameters
+    ----------
+    vox2ras_tkr : bool
+        If True, require nibabel has vox2ras_tkr support.
+
+    Returns
+    -------
+    has : bool
+        True if the user has nibabel.
+    """
     try:
         import nibabel
+        out = True
         if vox2ras_tkr:  # we need MGHHeader to have vox2ras_tkr param
-            mgh_ihdr = getattr(nibabel, 'MGHImage', None)
-            mgh_ihdr = getattr(mgh_ihdr, 'header_class', None)
-            get_vox2ras_tkr = getattr(mgh_ihdr, 'get_vox2ras_tkr', None)
-            if get_vox2ras_tkr is not None:
-                return True
-            else:
-                return False
-        else:
-            return True
+            out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),
+                                   'header_class', 0),
+                           'get_vox2ras_tkr', None) is not None)
+        return out
     except ImportError:
         return False
 
 
-def has_freesurfer():
+def has_mne_c():
     """Aux function"""
-    if not 'FREESURFER_HOME' in os.environ:
-        return False
-    else:
-        return True
-
+    return 'MNE_ROOT' in os.environ
 
-requires_fs_or_nibabel = np.testing.dec.skipif(not has_nibabel() and
-                                               not has_freesurfer(),
-                                               'Requires nibabel or '
-                                               'Freesurfer')
 
-
-def has_neuromag2ft():
+def has_freesurfer():
     """Aux function"""
-    if not 'NEUROMAG2FT_ROOT' in os.environ:
-        return False
-    else:
-        return True
-
-
-requires_neuromag2ft = np.testing.dec.skipif(not has_neuromag2ft(),
-                                             'Requires neuromag2ft')
+    return 'FREESURFER_HOME' in os.environ
 
 
 def requires_nibabel(vox2ras_tkr=False):
     """Aux function"""
-    if vox2ras_tkr:
-        extra = ' with vox2ras_tkr support'
-    else:
-        extra = ''
+    extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''
     return np.testing.dec.skipif(not has_nibabel(vox2ras_tkr),
                                  'Requires nibabel%s' % extra)
 
-requires_freesurfer = np.testing.dec.skipif(not has_freesurfer(),
-                                            'Requires Freesurfer')
-
-
-def requires_mem_gb(requirement):
-    """Decorator to skip test if insufficient memory is available"""
-    def real_decorator(function):
-        # convert to gb
-        req = int(1e9 * requirement)
-        try:
-            import psutil
-            has_psutil = True
-        except ImportError:
-            has_psutil = False
-
-        @wraps(function)
-        def dec(*args, **kwargs):
-            if has_psutil and psutil.virtual_memory().available >= req:
-                skip = False
-            else:
-                skip = True
-
-            if skip is True:
-                from nose.plugins.skip import SkipTest
-                raise SkipTest('Test %s skipped, requires >= %0.1f GB free '
-                               'memory' % (function.__name__, requirement))
-            ret = function(*args, **kwargs)
-            return ret
-        return dec
-    return real_decorator
-
-
-def requires_pandas(function):
-    """Decorator to skip test if pandas is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        skip = False
-        try:
-            import pandas
-            version = LooseVersion(pandas.__version__)
-            if version < '0.8.0':
-                skip = True
-        except ImportError:
-            skip = True
-
-        if skip is True:
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires pandas'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-
-        return ret
-
-    return dec
-
-
-def requires_tvtk(function):
-    """Decorator to skip test if TVTK is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        skip = False
-        try:
-            from tvtk.api import tvtk  # analysis:ignore
-        except ImportError:
-            skip = True
-
-        if skip is True:
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires TVTK'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-
-        return ret
-
-    return dec
-
-
-def requires_statsmodels(function):
-    """Decorator to skip test if statsmodels is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        skip = False
-        try:
-            import statsmodels  # noqa, analysis:ignore
-        except ImportError:
-            skip = True
-
-        if skip is True:
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires statsmodels'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-
-        return ret
-
-    return dec
 
+def requires_version(library, min_version):
+    """Helper for testing"""
+    return np.testing.dec.skipif(not check_version(library, min_version),
+                                 'Requires %s version >= %s'
+                                 % (library, min_version))
 
-def requires_patsy(function):
-    """
-    Decorator to skip test if patsy is not available. Patsy should be a
-    statsmodels dependency but apparently it's possible to install statsmodels
-    without it.
-    """
-    @wraps(function)
-    def dec(*args, **kwargs):
-        skip = False
-        try:
-            import patsy  # noqa, analysis:ignore
-        except ImportError:
-            skip = True
-
-        if skip is True:
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires patsy'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-
-        return ret
-
-    return dec
 
+def requires_module(function, name, call):
+    """Decorator to skip test if package is not available"""
+    try:
+        from nose.plugins.skip import SkipTest
+    except ImportError:
+        SkipTest = AssertionError
 
-def requires_sklearn(function):
-    """Decorator to skip test if sklearn is not available"""
     @wraps(function)
     def dec(*args, **kwargs):
-        required_version = '0.14'
         skip = False
         try:
-            import sklearn
-            version = LooseVersion(sklearn.__version__)
-            if version < required_version:
-                skip = True
-        except ImportError:
+            exec(call) in globals(), locals()
+        except Exception:
             skip = True
-
         if skip is True:
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires sklearn (version >= %s)'
-                           % (function.__name__, required_version))
-        ret = function(*args, **kwargs)
-
-        return ret
-
-    return dec
-
-
-def requires_good_network(function):
-    """Helper for testing"""
-
-    @wraps(function)
-    def dec(*args, **kwargs):
-        if int(os.environ.get('MNE_SKIP_NETWORK_TESTS', 0)):
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires a good network '
-                           'connection' % function.__name__)
-        ret = function(*args, **kwargs)
-
-        return ret
-
+            raise SkipTest('Test %s skipped, requires %s'
+                           % (function.__name__, name))
+        return function(*args, **kwargs)
     return dec
 
 
-def make_skipper_dec(module, skip_str):
-    """Helper to make skipping decorators"""
-    skip = False
-    try:
-        __import__(module)
-    except ImportError:
-        skip = True
-    return np.testing.dec.skipif(skip, skip_str)
-
-
-requires_nitime = make_skipper_dec('nitime', 'nitime not installed')
-requires_traits = make_skipper_dec('traits', 'traits not installed')
-
-
-def _mne_fs_not_in_env():
-    """Aux function"""
-    return (('FREESURFER_HOME' not in os.environ) or
-            ('MNE_ROOT' not in os.environ))
-
-requires_mne_fs_in_env = np.testing.dec.skipif(_mne_fs_not_in_env)
-
-
-def _check_mayavi_version(min_version='4.3.0'):
-    """Raise a RuntimeError if the required version of mayavi is not available
+_pandas_call = """
+import pandas
+version = LooseVersion(pandas.__version__)
+if version < '0.8.0':
+    raise ImportError
+"""
+
+_sklearn_call = """
+required_version = '0.14'
+import sklearn
+version = LooseVersion(sklearn.__version__)
+if version < required_version:
+    raise ImportError
+"""
+
+_sklearn_0_15_call = """
+required_version = '0.15'
+import sklearn
+version = LooseVersion(sklearn.__version__)
+if version < required_version:
+    raise ImportError
+"""
+
+_mayavi_call = """
+from mayavi import mlab
+mlab.options.backend = 'test'
+"""
+
+_mne_call = """
+if not has_mne_c():
+    raise ImportError
+"""
+
+_fs_call = """
+if not has_freesurfer():
+    raise ImportError
+"""
+
+_n2ft_call = """
+if 'NEUROMAG2FT_ROOT' not in os.environ:
+    raise ImportError
+"""
+
+_fs_or_ni_call = """
+if not has_nibabel() and not has_freesurfer():
+    raise ImportError
+"""
+
+requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
+requires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)
+requires_sklearn_0_15 = partial(requires_module, name='sklearn',
+                                call=_sklearn_0_15_call)
+requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
+requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
+requires_freesurfer = partial(requires_module, name='Freesurfer',
+                              call=_fs_call)
+requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
+                               call=_n2ft_call)
+requires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',
+                                 call=_fs_or_ni_call)
+
+requires_tvtk = partial(requires_module, name='TVTK',
+                        call='from tvtk.api import tvtk')
+requires_statsmodels = partial(requires_module, name='statsmodels',
+                               call='import statsmodels')
+requires_patsy = partial(requires_module, name='patsy',
+                         call='import patsy')
+requires_pysurfer = partial(requires_module, name='PySurfer',
+                            call='from surfer import Brain')
+requires_PIL = partial(requires_module, name='PIL',
+                       call='from PIL import Image')
+requires_good_network = partial(
+    requires_module, name='good network connection',
+    call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
+         '    raise ImportError')
+requires_nitime = partial(requires_module, name='nitime',
+                          call='import nitime')
+requires_traits = partial(requires_module, name='traits',
+                          call='import traits')
+requires_h5py = partial(requires_module, name='h5py', call='import h5py')
+
+
+def check_version(library, min_version):
+    """Check minimum library version required
 
     Parameters
     ----------
+    library : str
+        The library name to import. Must have a ``__version__`` property.
     min_version : str
-        The version string. Anything that matches
+        The minimum version string. Anything that matches
         ``'(\\d+ | [a-z]+ | \\.)'``
-    """
-    import mayavi
-    require_mayavi = LooseVersion(min_version)
-    has_mayavi = LooseVersion(mayavi.__version__)
-    if has_mayavi < require_mayavi:
-        raise RuntimeError("Need mayavi >= %s" % require_mayavi)
-
-
-def check_sklearn_version(min_version):
-    """Check minimum sklearn version required
 
-    Parameters
-    ----------
-    min_version : str
-        The version string. Anything that matches
-        ``'(\\d+ | [a-z]+ | \\.)'``
+    Returns
+    -------
+    ok : bool
+        True if the library exists with at least the specified version.
     """
     ok = True
     try:
-        import sklearn
-        this_version = LooseVersion(sklearn.__version__)
-        if this_version < min_version:
-            ok = False
+        library = __import__(library)
     except ImportError:
         ok = False
+    else:
+        this_version = LooseVersion(library.__version__)
+        if this_version < min_version:
+            ok = False
     return ok
 
 
-def check_scipy_version(min_version):
-    """Check minimum sklearn version required
+def _check_mayavi_version(min_version='4.3.0'):
+    """Helper for mayavi"""
+    if not check_version('mayavi', min_version):
+        raise RuntimeError("Need mayavi >= %s" % min_version)
+
+
+ at verbose
+def run_subprocess(command, verbose=None, *args, **kwargs):
+    """Run command using subprocess.Popen
+
+    Run command and wait for command to complete. If the return code was zero
+    then return, otherwise raise CalledProcessError.
+    By default, this will also add stdout= and stderr=subproces.PIPE
+    to the call to Popen to suppress printing to the terminal.
 
     Parameters
     ----------
-    min_version : str
-        The version string. Anything that matches
-        ``'(\\d+ | [a-z]+ | \\.)'``
-    """
-    this_version = LooseVersion(scipy.__version__)
-    return False if this_version < min_version else True
-
+    command : list of str
+        Command to run as subprocess (see subprocess.Popen documentation).
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+        Defaults to self.verbose.
+    *args, **kwargs : arguments
+        Additional arguments to pass to subprocess.Popen.
 
-def requires_scipy_version(min_version):
-    """Helper for testing"""
-    ok = check_scipy_version(min_version)
-    return np.testing.dec.skipif(not ok, 'Requires scipy version >= %s'
-                                 % min_version)
+    Returns
+    -------
+    stdout : str
+        Stdout returned by the process.
+    stderr : str
+        Stderr returned by the process.
+    """
+    for stdxxx, sys_stdxxx in (['stderr', sys.stderr],
+                               ['stdout', sys.stdout]):
+        if stdxxx not in kwargs:
+            kwargs[stdxxx] = subprocess.PIPE
+        elif kwargs[stdxxx] is sys_stdxxx:
+            if isinstance(sys_stdxxx, StringIO):
+                # nose monkey patches sys.stderr and sys.stdout to StringIO
+                kwargs[stdxxx] = subprocess.PIPE
+            else:
+                kwargs[stdxxx] = sys_stdxxx
 
+    # Check the PATH environment variable. If run_subprocess() is to be called
+    # frequently this should be refactored so as to only check the path once.
+    env = kwargs.get('env', os.environ)
+    if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
+        msg = ("Your PATH environment variable contains at least one path "
+               "starting with a tilde ('~') character. Such paths are not "
+               "interpreted correctly from within Python. It is recommended "
+               "that you use '$HOME' instead of '~'.")
+        warnings.warn(msg)
 
-def _check_pytables():
-    """Helper to error if Pytables is not found"""
+    logger.info("Running subprocess: %s" % ' '.join(command))
     try:
-        import tables as tb
-    except ImportError:
-        raise ImportError('pytables could not be imported')
-    return tb
+        p = subprocess.Popen(command, *args, **kwargs)
+    except Exception:
+        logger.error('Command not found: %s' % (command[0],))
+        raise
+    stdout_, stderr = p.communicate()
+    stdout_ = '' if stdout_ is None else stdout_.decode('utf-8')
+    stderr = '' if stderr is None else stderr.decode('utf-8')
 
+    if stdout_.strip():
+        logger.info("stdout:\n%s" % stdout_)
+    if stderr.strip():
+        logger.info("stderr:\n%s" % stderr)
 
-def requires_pytables():
-    """Helper for testing"""
-    have = True
-    try:
-        _check_pytables()
-    except ImportError:
-        have = False
-    return np.testing.dec.skipif(not have, 'Requires pytables')
+    output = (stdout_, stderr)
+    if p.returncode:
+        print(output)
+        err_fun = subprocess.CalledProcessError.__init__
+        if 'output' in inspect.getargspec(err_fun).args:
+            raise subprocess.CalledProcessError(p.returncode, command, output)
+        else:
+            raise subprocess.CalledProcessError(p.returncode, command)
+
+    return output
 
 
 ###############################################################################
@@ -889,7 +837,7 @@ def set_log_level(verbose=None, return_old_level=False):
         INFO, WARNING, ERROR, or CRITICAL. Note that these are for
         convenience and are equivalent to passing in logging.DEBUG, etc.
         For bool, True is the same as 'INFO', False is the same as 'WARNING'.
-        If None, the environment variable MNE_LOG_LEVEL is read, and if
+        If None, the environment variable MNE_LOGGING_LEVEL is read, and if
         it doesn't exist, defaults to INFO.
     return_old_level : bool
         If True, return the old verbosity level.
@@ -906,7 +854,7 @@ def set_log_level(verbose=None, return_old_level=False):
         logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
                              WARNING=logging.WARNING, ERROR=logging.ERROR,
                              CRITICAL=logging.CRITICAL)
-        if not verbose in logging_types:
+        if verbose not in logging_types:
             raise ValueError('verbose must be of a valid type')
         verbose = logging_types[verbose]
     logger = logging.getLogger('mne')
@@ -925,7 +873,9 @@ def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
         To suppress log outputs, use set_log_level('WARN').
     output_format : str
         Format of the output messages. See the following for examples:
-            http://docs.python.org/dev/howto/logging.html
+
+            https://docs.python.org/dev/howto/logging.html
+
         e.g., "%(asctime)s - %(levelname)s - %(message)s".
     overwrite : bool, or None
         Overwrite the log file (if it exists). Otherwise, statements
@@ -983,8 +933,12 @@ def get_subjects_dir(subjects_dir=None, raise_error=False):
     return subjects_dir
 
 
+_temp_home_dir = None
+
+
 def _get_extra_data_path(home_dir=None):
     """Get path to extra data (config, tables, etc.)"""
+    global _temp_home_dir
     if home_dir is None:
         # this has been checked on OSX64, Linux64, and Win32
         if 'nt' == os.name.lower():
@@ -996,7 +950,14 @@ def _get_extra_data_path(home_dir=None):
             # of script that isn't launched via the command line (e.g. a script
             # launched via Upstart) then the HOME environment variable will
             # not be set.
-            home_dir = os.path.expanduser('~')
+            if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
+                if _temp_home_dir is None:
+                    _temp_home_dir = tempfile.mkdtemp()
+                    atexit.register(partial(shutil.rmtree, _temp_home_dir,
+                                            ignore_errors=True))
+                home_dir = _temp_home_dir
+            else:
+                home_dir = os.path.expanduser('~')
 
         if home_dir is None:
             raise ValueError('mne-python config file path could '
@@ -1073,21 +1034,24 @@ known_config_types = [
     'MNE_DATA',
     'MNE_DATASETS_MEGSIM_PATH',
     'MNE_DATASETS_SAMPLE_PATH',
+    'MNE_DATASETS_SOMATO_PATH',
     'MNE_DATASETS_SPM_FACE_PATH',
     'MNE_DATASETS_EEGBCI_PATH',
+    'MNE_DATASETS_BRAINSTORM_PATH',
+    'MNE_DATASETS_TESTING_PATH',
     'MNE_LOGGING_LEVEL',
     'MNE_USE_CUDA',
     'SUBJECTS_DIR',
     'MNE_CACHE_DIR',
     'MNE_MEMMAP_MIN_SIZE',
-    'MNE_SKIP_SAMPLE_DATASET_TESTS',
+    'MNE_SKIP_TESTING_DATASET_TESTS',
     'MNE_DATASETS_SPM_FACE_DATASETS_TESTS'
-    ]
+]
 
 # These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
 known_config_wildcards = [
     'MNE_STIM_CHANNEL',
-    ]
+]
 
 
 def get_config(key=None, default=None, raise_error=False, home_dir=None):
@@ -1112,10 +1076,14 @@ def get_config(key=None, default=None, raise_error=False, home_dir=None):
     -------
     value : dict | str | None
         The preference key value.
+
+    See Also
+    --------
+    set_config
     """
 
     if key is not None and not isinstance(key, string_types):
-        raise ValueError('key must be a string')
+        raise TypeError('key must be a string')
 
     # first, check to see if key is in env
     if key is not None and key in os.environ:
@@ -1161,14 +1129,18 @@ def set_config(key, value, home_dir=None):
     home_dir : str | None
         The folder that contains the .mne config folder.
         If None, it is found automatically.
+
+    See Also
+    --------
+    get_config
     """
     if not isinstance(key, string_types):
-        raise ValueError('key must be a string')
+        raise TypeError('key must be a string')
     # While JSON allow non-string types, we allow users to override config
     # settings using env, which are strings, so we enforce that here
     if not isinstance(value, string_types) and value is not None:
-        raise ValueError('value must be a string or None')
-    if not key in known_config_types and not \
+        raise TypeError('value must be a string or None')
+    if key not in known_config_types and not \
             any(k in key for k in known_config_wildcards):
         warnings.warn('Setting non-standard config type: "%s"' % key)
 
@@ -1237,7 +1209,7 @@ class ProgressBar(object):
     template = '\r[{0}{1}] {2:.05f} {3} {4}   '
 
     def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,
-                 progress_character='.', spinner=False):
+                 progress_character='.', spinner=False, verbose_bool=True):
         self.cur_value = initial_value
         self.max_value = float(max_value)
         self.mesg = mesg
@@ -1246,6 +1218,7 @@ class ProgressBar(object):
         self.spinner = spinner
         self.spinner_index = 0
         self.n_spinner = len(self.spinner_symbols)
+        self._do_print = verbose_bool
 
     def update(self, cur_value, mesg=None):
         """Update progressbar with current value of process
@@ -1264,7 +1237,7 @@ class ProgressBar(object):
         # Ensure floating-point division so we can get fractions of a percent
         # for the progressbar.
         self.cur_value = cur_value
-        progress = float(self.cur_value) / self.max_value
+        progress = min(float(self.cur_value) / self.max_value, 1.)
         num_chars = int(progress * self.max_chars)
         num_left = self.max_chars - num_chars
 
@@ -1280,15 +1253,15 @@ class ProgressBar(object):
                                    progress * 100,
                                    self.spinner_symbols[self.spinner_index],
                                    self.mesg)
-        sys.stdout.write(bar)
+        # Force a flush because sometimes when using bash scripts and pipes,
+        # the output is not printed until after the program exits.
+        if self._do_print:
+            sys.stdout.write(bar)
+            sys.stdout.flush()
         # Increament the spinner
         if self.spinner:
             self.spinner_index = (self.spinner_index + 1) % self.n_spinner
 
-        # Force a flush because sometimes when using bash scripts and pipes,
-        # the output is not printed until after the program exits.
-        sys.stdout.flush()
-
     def update_with_increment_value(self, increment_value, mesg=None):
         """Update progressbar with the value of the increment instead of the
         current value of process as in update()
@@ -1308,21 +1281,7 @@ class ProgressBar(object):
         self.update(self.cur_value, mesg)
 
 
-class _HTTPResumeURLOpener(urllib.request.FancyURLopener):
-    """Create sub-class in order to overide error 206.
-
-    This error means a partial file is being sent, which is ok in this case.
-    Do nothing with this error.
-    """
-    # Adapted from:
-    # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
-    # http://code.activestate.com/recipes/83208-resuming-download-of-a-file/
-
-    def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
-        pass
-
-
-def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
+def _chunk_read(response, local_file, initial_size=0, verbose_bool=True):
     """Download a file chunk by chunk and show advancement
 
     Can also be used when resuming downloads over http.
@@ -1333,32 +1292,43 @@ def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
         Response to the download request in order to get file size.
     local_file: file
         Hard disk file where data should be written.
-    chunk_size: integer, optional
-        Size of downloaded chunks. Default: 8192
     initial_size: int, optional
         If resuming, indicate the initial size of the file.
+
+    Notes
+    -----
+    The chunk size will be automatically adapted based on the connection
+    speed.
     """
     # Adapted from NISL:
     # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
 
-    bytes_so_far = initial_size
     # Returns only amount left to download when resuming, not the size of the
     # entire file
-    total_size = int(response.headers['Content-Length'].strip())
+    total_size = int(response.headers.get('Content-Length', '1').strip())
     total_size += initial_size
 
-    progress = ProgressBar(total_size, initial_value=bytes_so_far,
-                           max_chars=40, spinner=True, mesg='downloading')
+    progress = ProgressBar(total_size, initial_value=initial_size,
+                           max_chars=40, spinner=True, mesg='downloading',
+                           verbose_bool=verbose_bool)
+    chunk_size = 8192  # 2 ** 13
     while True:
+        t0 = time.time()
         chunk = response.read(chunk_size)
-        bytes_so_far += len(chunk)
+        dt = time.time() - t0
+        if dt < 0.001:
+            chunk_size *= 2
+        elif dt > 0.5 and chunk_size > 8192:
+            chunk_size = chunk_size // 2
         if not chunk:
-            sys.stderr.write('\n')
+            if verbose_bool:
+                sys.stdout.write('\n')
+                sys.stdout.flush()
             break
         _chunk_write(chunk, local_file, progress)
 
 
-def _chunk_read_ftp_resume(url, temp_file_name, local_file):
+def _chunk_read_ftp_resume(url, temp_file_name, local_file, verbose_bool=True):
     """Resume downloading of a file from an FTP server"""
     # Adapted from: https://pypi.python.org/pypi/fileDownloader.py
     # but with changes
@@ -1382,12 +1352,17 @@ def _chunk_read_ftp_resume(url, temp_file_name, local_file):
     down_cmd = "RETR " + file_name
     file_size = data.size(file_name)
     progress = ProgressBar(file_size, initial_value=local_file_size,
-                           max_chars=40, spinner=True, mesg='downloading')
+                           max_chars=40, spinner=True, mesg='downloading',
+                           verbose_bool=verbose_bool)
+
     # Callback lambda function that will be passed the downloaded data
     # chunk and will write it to file and update the progress bar
-    chunk_write = lambda chunk: _chunk_write(chunk, local_file, progress)
+    def chunk_write(chunk):
+        return _chunk_write(chunk, local_file, progress)
     data.retrbinary(down_cmd, chunk_write)
     data.close()
+    sys.stdout.write('\n')
+    sys.stdout.flush()
 
 
 def _chunk_write(chunk, local_file, progress):
@@ -1396,7 +1371,9 @@ def _chunk_write(chunk, local_file, progress):
     progress.update_with_increment_value(len(chunk))
 
 
-def _fetch_file(url, file_name, print_destination=True, resume=True):
+ at verbose
+def _fetch_file(url, file_name, print_destination=True, resume=True,
+                hash_=None, verbose=None):
     """Load requested file, downloading it if needed or requested
 
     Parameters
@@ -1410,58 +1387,83 @@ def _fetch_file(url, file_name, print_destination=True, resume=True):
         download finishes.
     resume: bool, optional
         If true, try to resume partially downloaded files.
+    hash_ : str | None
+        The hash of the file to check. If None, no checking is
+        performed.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
     """
     # Adapted from NISL:
     # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
-
+    if hash_ is not None and (not isinstance(hash_, string_types) or
+                              len(hash_) != 32):
+        raise ValueError('Bad hash value given, should be a 32-character '
+                         'string:\n%s' % (hash_,))
     temp_file_name = file_name + ".part"
     local_file = None
     initial_size = 0
+    verbose_bool = (logger.level <= 20)  # 20 is info
     try:
         # Checking file size and displaying it alongside the download url
-        u = urllib.request.urlopen(url)
+        u = urllib.request.urlopen(url, timeout=10.)
         try:
-            file_size = int(u.headers['Content-Length'].strip())
+            file_size = int(u.headers.get('Content-Length', '1').strip())
         finally:
+            u.close()
             del u
-        print('Downloading data from %s (%s)' % (url, sizeof_fmt(file_size)))
+        logger.info('Downloading data from %s (%s)\n'
+                    % (url, sizeof_fmt(file_size)))
         # Downloading data
         if resume and os.path.exists(temp_file_name):
             local_file = open(temp_file_name, "ab")
             # Resuming HTTP and FTP downloads requires different procedures
             scheme = urllib.parse.urlparse(url).scheme
-            if scheme == 'http':
-                url_opener = _HTTPResumeURLOpener()
+            if scheme in ('http', 'https'):
                 local_file_size = os.path.getsize(temp_file_name)
                 # If the file exists, then only download the remainder
-                url_opener.addheader("Range", "bytes=%s-" % (local_file_size))
+                req = urllib.request.Request(url)
+                req.headers["Range"] = "bytes=%s-" % local_file_size
                 try:
-                    data = url_opener.open(url)
-                except urllib.request.HTTPError:
+                    data = urllib.request.urlopen(req)
+                except Exception:
                     # There is a problem that may be due to resuming, some
                     # servers may not support the "Range" header. Switch back
                     # to complete download method
-                    print('Resuming download failed. Attempting to restart '
-                          'downloading the entire file.')
-                    _fetch_file(url, resume=False)
+                    logger.info('Resuming download failed. Attempting to '
+                                'restart downloading the entire file.')
+                    local_file.close()
+                    _fetch_file(url, file_name, resume=False)
                 else:
-                    _chunk_read(data, local_file, initial_size=local_file_size)
+                    _chunk_read(data, local_file, initial_size=local_file_size,
+                                verbose_bool=verbose_bool)
+                    data.close()
                     del data  # should auto-close
             else:
-                _chunk_read_ftp_resume(url, temp_file_name, local_file)
+                _chunk_read_ftp_resume(url, temp_file_name, local_file,
+                                       verbose_bool=verbose_bool)
         else:
             local_file = open(temp_file_name, "wb")
             data = urllib.request.urlopen(url)
             try:
-                _chunk_read(data, local_file, initial_size=initial_size)
+                _chunk_read(data, local_file, initial_size=initial_size,
+                            verbose_bool=verbose_bool)
             finally:
+                data.close()
                 del data  # should auto-close
         # temp file must be closed prior to the move
         if not local_file.closed:
             local_file.close()
+        # check md5sum
+        if hash_ is not None:
+            logger.info('Verifying download hash.')
+            md5 = md5sum(temp_file_name)
+            if hash_ != md5:
+                raise RuntimeError('Hash mismatch for downloaded file %s, '
+                                   'expected %s but got %s'
+                                   % (temp_file_name, hash_, md5))
         shutil.move(temp_file_name, file_name)
         if print_destination is True:
-            stdout.write('File saved as %s.\n' % file_name)
+            logger.info('File saved as %s.\n' % file_name)
     except Exception as e:
         logger.error('Error while fetching file %s.'
                      ' Dataset fetching aborted.' % url)
@@ -1502,27 +1504,55 @@ def _url_to_local_path(url, path):
     return destination
 
 
-def _get_stim_channel(stim_channel):
-    """Helper to determine the appropriate stim_channel"""
+def _get_stim_channel(stim_channel, info):
+    """Helper to determine the appropriate stim_channel
+
+    First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
+    are read. If these are not found, it will fall back to 'STI 014' if
+    present, then fall back to the first channel of type 'stim', if present.
+
+    Parameters
+    ----------
+    stim_channel : str | list of str | None
+        The stim channel selected by the user.
+    info : instance of Info
+        An information structure containing information about the channels.
+
+    Returns
+    -------
+    stim_channel : str | list of str
+        The name of the stim channel(s) to use
+    """
     if stim_channel is not None:
         if not isinstance(stim_channel, list):
             if not isinstance(stim_channel, string_types):
-                raise ValueError('stim_channel must be a str, list, or None')
+                raise TypeError('stim_channel must be a str, list, or None')
             stim_channel = [stim_channel]
-        if not all([isinstance(s, string_types) for s in stim_channel]):
-            raise ValueError('stim_channel list must contain all strings')
+        if not all(isinstance(s, string_types) for s in stim_channel):
+            raise TypeError('stim_channel list must contain all strings')
         return stim_channel
 
     stim_channel = list()
     ch_count = 0
     ch = get_config('MNE_STIM_CHANNEL')
-    while(ch is not None):
+    while(ch is not None and ch in info['ch_names']):
         stim_channel.append(ch)
         ch_count += 1
         ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
-    if ch_count == 0:
-        stim_channel = ['STI 014']
-    return stim_channel
+    if ch_count > 0:
+        return stim_channel
+
+    if 'STI 014' in info['ch_names']:
+        return ['STI 014']
+
+    from .io.pick import pick_types
+    stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
+    if len(stim_channel) > 0:
+        stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
+        return stim_channel
+
+    raise ValueError("No stim channels found. Consider specifying them "
+                     "manually using the 'stim_channel' parameter.")
 
 
 def _check_fname(fname, overwrite):
@@ -1571,7 +1601,7 @@ def _check_pandas_index_arguments(index, defaults):
     """ Helper function to check pandas index arguments """
     if not any(isinstance(index, k) for k in (list, tuple)):
         index = [index]
-    invalid_choices = [e for e in index if not e in defaults]
+    invalid_choices = [e for e in index if e not in defaults]
     if invalid_choices:
         options = [', '.join(e) for e in [invalid_choices, defaults]]
         raise ValueError('[%s] is not an valid option. Valid index'
@@ -1601,6 +1631,8 @@ def _clean_names(names, remove_whitespace=False, before_dash=True):
             name = name.replace(' ', '')
         if '-' in name and before_dash:
             name = name.split('-')[0]
+        if name.endswith('_virtual'):
+            name = name[:-8]
         cleaned.append(name)
 
     return cleaned
@@ -1614,6 +1646,14 @@ def clean_warning_registry():
     for mod in list(sys.modules.values()):
         if mod.__class__.__name__ not in bad_names and hasattr(mod, reg):
             getattr(mod, reg).clear()
+    # hack to deal with old scipy/numpy in tests
+    if os.getenv('TRAVIS') == 'true' and sys.version.startswith('2.6'):
+        warnings.simplefilter('default')
+        try:
+            np.rank([])
+        except Exception:
+            pass
+        warnings.simplefilter('always')
 
 
 def _check_type_picks(picks):
@@ -1622,7 +1662,7 @@ def _check_type_picks(picks):
     if picks is None:
         pass
     elif isinstance(picks, list):
-        if not all([isinstance(i, int) for i in picks]):
+        if not all(isinstance(i, int) for i in picks):
             raise ValueError(err_msg)
         picks = np.array(picks)
     elif isinstance(picks, np.ndarray):
@@ -1631,3 +1671,222 @@ def _check_type_picks(picks):
     else:
         raise ValueError(err_msg)
     return picks
+
+
+ at nottest
+def run_tests_if_main(measure_mem=False):
+    """Run tests in a given file if it is run as a script"""
+    local_vars = inspect.currentframe().f_back.f_locals
+    if not local_vars.get('__name__', '') == '__main__':
+        return
+    # we are in a "__main__"
+    try:
+        import faulthandler
+        faulthandler.enable()
+    except Exception:
+        pass
+    with warnings.catch_warnings(record=True):  # memory_usage internal dep.
+        mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
+    if mem >= 0:
+        print('Memory consumption after import: %s' % mem)
+    t0 = time.time()
+    peak_mem, peak_name = mem, 'import'
+    max_elapsed, elapsed_name = 0, 'N/A'
+    count = 0
+    for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
+        val = local_vars[name]
+        if name.startswith('_'):
+            continue
+        elif callable(val) and name.startswith('test'):
+            count += 1
+            doc = val.__doc__.strip() if val.__doc__ else name
+            sys.stdout.write('%s ... ' % doc)
+            sys.stdout.flush()
+            try:
+                t1 = time.time()
+                if measure_mem:
+                    with warnings.catch_warnings(record=True):  # dep warn
+                        mem = int(round(max(memory_usage((val, (), {})))))
+                else:
+                    val()
+                    mem = -1
+                if mem >= peak_mem:
+                    peak_mem, peak_name = mem, name
+                mem = (', mem: %s MB' % mem) if mem >= 0 else ''
+                elapsed = int(round(time.time() - t1))
+                if elapsed >= max_elapsed:
+                    max_elapsed, elapsed_name = elapsed, name
+                sys.stdout.write('time: %s sec%s\n' % (elapsed, mem))
+                sys.stdout.flush()
+            except Exception as err:
+                if 'skiptest' in err.__class__.__name__.lower():
+                    sys.stdout.write('SKIP (%s)\n' % str(err))
+                    sys.stdout.flush()
+                else:
+                    raise
+    elapsed = int(round(time.time() - t0))
+    sys.stdout.write('Total: %s tests\n• %s sec (%s sec for %s)\n• Peak memory'
+                     ' %s MB (%s)\n' % (count, elapsed, max_elapsed,
+                                        elapsed_name, peak_mem, peak_name))
+
+
+class ArgvSetter(object):
+    """Temporarily set sys.argv"""
+    def __init__(self, args=(), disable_stdout=True, disable_stderr=True):
+        self.argv = list(('python',) + args)
+        self.stdout = StringIO() if disable_stdout else sys.stdout
+        self.stderr = StringIO() if disable_stderr else sys.stderr
+
+    def __enter__(self):
+        self.orig_argv = sys.argv
+        sys.argv = self.argv
+        self.orig_stdout = sys.stdout
+        sys.stdout = self.stdout
+        self.orig_stderr = sys.stderr
+        sys.stderr = self.stderr
+        return self
+
+    def __exit__(self, *args):
+        sys.argv = self.orig_argv
+        sys.stdout = self.orig_stdout
+        sys.stderr = self.orig_stderr
+
+
+def md5sum(fname, block_size=1048576):  # 2 ** 20
+    """Calculate the md5sum for a file
+
+    Parameters
+    ----------
+    fname : str
+        Filename.
+    block_size : int
+        Block size to use when reading.
+
+    Returns
+    -------
+    hash_ : str
+        The hexidecimal digest of the hash.
+    """
+    md5 = hashlib.md5()
+    with open(fname, 'rb') as fid:
+        while True:
+            data = fid.read(block_size)
+            if not data:
+                break
+            md5.update(data)
+    return md5.hexdigest()
+
+
+def _sphere_to_cartesian(theta, phi, r):
+    """Transform spherical coordinates to cartesian"""
+    z = r * np.sin(phi)
+    rcos_phi = r * np.cos(phi)
+    x = rcos_phi * np.cos(theta)
+    y = rcos_phi * np.sin(theta)
+    return x, y, z
+
+
+def create_slices(start, stop, step=None, length=1):
+    """ Generate slices of time indexes
+
+    Parameters
+    ----------
+    start : int
+        Index where first slice should start.
+    stop : int
+        Index where last slice should maximally end.
+    length : int
+        Number of time sample included in a given slice.
+    step: int | None
+        Number of time samples separating two slices.
+        If step = None, step = length.
+
+    Returns
+    -------
+    slices : list
+        List of slice objects.
+    """
+
+    # default parameters
+    if step is None:
+        step = length
+
+    # slicing
+    slices = [slice(t, t + length, 1) for t in
+              range(start, stop - length + 1, step)]
+    return slices
+
+
+def _time_mask(times, tmin=None, tmax=None, strict=False):
+    """Helper to safely find sample boundaries"""
+    tmin = -np.inf if tmin is None else tmin
+    tmax = np.inf if tmax is None else tmax
+    mask = (times >= tmin)
+    mask &= (times <= tmax)
+    if not strict:
+        mask |= isclose(times, tmin)
+        mask |= isclose(times, tmax)
+    return mask
+
+
+def _get_fast_dot():
+    """"Helper to get fast dot"""
+    try:
+        from sklearn.utils.extmath import fast_dot
+    except ImportError:
+        fast_dot = np.dot
+    return fast_dot
+
+
+def random_permutation(n_samples, random_state=None):
+    """Helper to emulate the randperm matlab function.
+
+    It returns a vector containing a random permutation of the
+    integers between 0 and n_samples-1. It returns the same random numbers
+    than randperm matlab function whenever the random_state is the same
+    as the matlab's random seed.
+
+    This function is useful for comparing against matlab scripts
+    which use the randperm function.
+
+    Note: the randperm(n_samples) matlab function generates a random
+    sequence between 1 and n_samples, whereas
+    random_permutation(n_samples, random_state) function generates
+    a random sequence between 0 and n_samples-1, that is:
+    randperm(n_samples) = random_permutation(n_samples, random_state) - 1
+
+    Parameters
+    ----------
+    n_samples : int
+        End point of the sequence to be permuted (excluded, i.e., the end point
+        is equal to n_samples-1)
+    random_state : int | None
+        Random seed for initializing the pseudo-random number generator.
+
+    Returns
+    -------
+    randperm : ndarray, int
+        Randomly permuted sequence between 0 and n-1.
+    """
+    rng = check_random_state(random_state)
+    idx = rng.rand(n_samples)
+
+    randperm = np.argsort(idx)
+
+    return randperm
+
+
+def compute_corr(x, y):
+    """Compute pearson correlations between a vector and a matrix"""
+    if len(x) == 0 or len(y) == 0:
+        raise ValueError('x or y has zero length')
+    fast_dot = _get_fast_dot()
+    X = np.array(x, float)
+    Y = np.array(y, float)
+    X -= X.mean(0)
+    Y -= Y.mean(0)
+    x_sd = X.std(0, ddof=1)
+    # if covariance matrix is fully expanded, Y needs a
+    # transpose / broadcasting else Y is correct
+    y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
+    return (fast_dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py
index 6e06884..044bbe9 100644
--- a/mne/viz/_3d.py
+++ b/mne/viz/_3d.py
@@ -7,26 +7,32 @@ from __future__ import print_function
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 #          Mainak Jas <mainak at neuro.hut.fi>
+#          Mark Wronkiewicz <wronk.mark at gmail.com>
 #
 # License: Simplified BSD
 
 from ..externals.six import string_types, advance_iterator
 
-from distutils.version import LooseVersion
-
-import os
+import os.path as op
 import inspect
 import warnings
 from itertools import cycle
+import base64
 
 import numpy as np
 from scipy import linalg
 
 from ..io.pick import pick_types
-from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
-from ..transforms import read_trans, _find_trans, apply_trans
-from ..utils import get_subjects_dir, logger, _check_subject
+from ..io.constants import FIFF
+from ..surface import (get_head_surf, get_meg_helmet_surf, read_surface,
+                       transform_surface_to)
+from ..transforms import (read_trans, _find_trans, apply_trans,
+                          combine_transforms, _get_mri_head_t, _ensure_trans,
+                          invert_transform)
+from ..utils import get_subjects_dir, logger, _check_subject, verbose
+from ..defaults import _handle_default
 from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
+from ..externals.six import BytesIO
 
 
 def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
@@ -147,7 +153,7 @@ def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
 
 
 def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
-                       slices=None, show=True):
+                       slices=None, show=True, img_output=False):
     """Plot BEM contours on anatomical slices.
 
     Parameters
@@ -163,11 +169,17 @@ def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
         Slice indices.
     show : bool
         Call pyplot.show() at the end.
+    img_output : None | tuple
+        If tuple (width and height), images will be produced instead of a
+        single figure with many axes. This mode is designed to reduce the
+        (substantial) overhead associated with making tens to hundreds
+        of matplotlib axes, instead opting to re-use a single Axes instance.
 
     Returns
     -------
-    fig : Instance of matplotlib.figure.Figure
-        The figure.
+    fig : Instance of matplotlib.figure.Figure | list
+        The figure. Will instead be a list of png images if
+        img_output is a tuple.
     """
     import matplotlib.pyplot as plt
     import nibabel as nib
@@ -203,10 +215,22 @@ def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
         surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
         surfs.append(surf)
 
-    fig, axs = _prepare_trellis(len(slices), 4)
-
+    if img_output is None:
+        fig, axs = _prepare_trellis(len(slices), 4)
+    else:
+        fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
+        axs = [ax] * len(slices)
+
+        fig_size = fig.get_size_inches()
+        w, h = img_output[0], img_output[1]
+        w2 = fig_size[0]
+        fig.set_size_inches([(w2 / float(w)) * w, (w2 / float(w)) * h])
+        plt.close(fig)
+
+    inds = dict(coronal=[0, 1, 2], axial=[2, 0, 1],
+                sagittal=[2, 1, 0])[orientation]
+    outs = []
     for ax, sl in zip(axs, slices):
-
         # adjust the orientations for good view
         if orientation == 'coronal':
             dat = data[:, :, sl].transpose()
@@ -216,43 +240,46 @@ def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
             dat = data[sl, :, :]
 
         # First plot the anatomical data
+        if img_output is not None:
+            ax.clear()
         ax.imshow(dat, cmap=plt.cm.gray)
         ax.axis('off')
 
         # and then plot the contours on top
         for surf in surfs:
-            if orientation == 'coronal':
-                ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
-                              surf['tris'], surf['rr'][:, 2],
-                              levels=[sl], colors='yellow', linewidths=2.0)
-            elif orientation == 'axial':
-                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
-                              surf['tris'], surf['rr'][:, 1],
-                              levels=[sl], colors='yellow', linewidths=2.0)
-            elif orientation == 'sagittal':
-                ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
-                              surf['tris'], surf['rr'][:, 0],
-                              levels=[sl], colors='yellow', linewidths=2.0)
-
+            ax.tricontour(surf['rr'][:, inds[0]], surf['rr'][:, inds[1]],
+                          surf['tris'], surf['rr'][:, inds[2]],
+                          levels=[sl], colors='yellow', linewidths=2.0)
+        if img_output is not None:
+            ax.set_xticks([])
+            ax.set_yticks([])
+            ax.set_xlim(0, img_output[1])
+            ax.set_ylim(img_output[0], 0)
+            output = BytesIO()
+            fig.savefig(output, bbox_inches='tight',
+                        pad_inches=0, format='png')
+            outs.append(base64.b64encode(output.getvalue()).decode('ascii'))
     if show:
         plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
                             hspace=0.)
         plt.show()
 
-    return fig
+    return fig if img_output is None else outs
 
 
-def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
-               ch_type=None, source='bem'):
+ at verbose
+def plot_trans(info, trans='auto', subject=None, subjects_dir=None,
+               ch_type=None, source=('bem', 'head'), coord_frame='head',
+               meg_sensors=False, dig=False, verbose=None):
     """Plot MEG/EEG head surface and helmet in 3D.
 
     Parameters
     ----------
     info : dict
         The measurement info.
-    trans_fname : str | 'auto'
-        The full path to the `*-trans.fif` file produced during
-        coregistration.
+    trans : str | 'auto' | dict
+        The full path to the head<->MRI transform ``*-trans.fif`` file
+        produced during coregistration.
     subject : str | None
         The subject name corresponding to FreeSurfer environment
         variable SUBJECT.
@@ -268,76 +295,213 @@ def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
         try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
         then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
         to 'bem'. Note. For single layer bems it is recommended to use 'head'.
+    coord_frame : str
+        Coordinate frame to use, 'head', 'meg', or 'mri'.
+    meg_sensors : bool
+        If True, plot MEG sensors as points in addition to showing the helmet.
+    dig : bool
+        If True, plot the digitization points.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
 
     Returns
     -------
     fig : instance of mlab.Figure
         The mayavi figure.
     """
-
+    if coord_frame not in ['head', 'meg', 'mri']:
+        raise ValueError('coord_frame must be "head" or "meg"')
     if ch_type not in [None, 'eeg', 'meg']:
         raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
                          % ch_type)
 
-    if trans_fname == 'auto':
-        # let's try to do this in MRI coordinates so they're easy to plot
-        trans_fname = _find_trans(subject, subjects_dir)
-
-    trans = read_trans(trans_fname)
-
+    if isinstance(trans, string_types):
+        if trans == 'auto':
+            # let's try to do this in MRI coordinates so they're easy to plot
+            trans = _find_trans(subject, subjects_dir)
+        trans = read_trans(trans)
+    elif not isinstance(trans, dict):
+        raise TypeError('trans must be str or dict')
+    head_mri_t = _ensure_trans(trans, 'head', 'mri')
+    del trans
+
+    # both the head and helmet will be in MRI coordinates after this
     surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
     if ch_type is None or ch_type == 'meg':
-        surfs.append(get_meg_helmet_surf(info, trans))
-
-    # Plot them
+        surfs.append(get_meg_helmet_surf(info, head_mri_t))
+    if coord_frame == 'meg':
+        surf_trans = combine_transforms(info['dev_head_t'], head_mri_t,
+                                        'meg', 'mri')
+    elif coord_frame == 'head':
+        surf_trans = head_mri_t
+    else:  # coord_frame == 'mri'
+        surf_trans = None
+    surfs = [transform_surface_to(surf, coord_frame, surf_trans)
+             for surf in surfs]
+    del surf_trans
+
+    # determine points
+    meg_loc = list()
+    ext_loc = list()
+    car_loc = list()
+    if ch_type is None or ch_type == 'eeg':
+        eeg_loc = np.array([info['chs'][k]['loc'][:3]
+                           for k in pick_types(info, meg=False, eeg=True)])
+        if len(eeg_loc) > 0:
+            # Transform EEG electrodes from head coordinates if necessary
+            if coord_frame == 'meg':
+                eeg_loc = apply_trans(invert_transform(info['dev_head_t']),
+                                      eeg_loc)
+            elif coord_frame == 'mri':
+                eeg_loc = apply_trans(invert_transform(head_mri_t), eeg_loc)
+        else:
+            # only warn if EEG explicitly requested, or EEG channels exist but
+            # no locations are provided
+            if (ch_type is not None or
+                    len(pick_types(info, meg=False, eeg=True)) > 0):
+                warnings.warn('EEG electrode locations not found. '
+                              'Cannot plot EEG electrodes.')
+    if meg_sensors:
+        meg_loc = np.array([info['chs'][k]['loc'][:3]
+                           for k in pick_types(info)])
+        if len(meg_loc) > 0:
+            # Transform MEG coordinates from meg if necessary
+            if coord_frame == 'head':
+                meg_loc = apply_trans(info['dev_head_t'], meg_loc)
+            elif coord_frame == 'mri':
+                t = combine_transforms(info['dev_head_t'], head_mri_t,
+                                       'meg', 'mri')
+                meg_loc = apply_trans(t, meg_loc)
+        else:
+            warnings.warn('MEG electrodes not found. '
+                          'Cannot plot MEG locations.')
+    if dig:
+        ext_loc = np.array([d['r'] for d in info['dig']
+                           if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
+        car_loc = np.array([d['r'] for d in info['dig']
+                            if d['kind'] == FIFF.FIFFV_POINT_CARDINAL])
+        if coord_frame == 'meg':
+            t = invert_transform(info['dev_head_t'])
+            ext_loc = apply_trans(t, ext_loc)
+            car_loc = apply_trans(t, car_loc)
+        elif coord_frame == 'mri':
+            ext_loc = apply_trans(head_mri_t, ext_loc)
+            car_loc = apply_trans(head_mri_t, car_loc)
+        if len(car_loc) == len(ext_loc) == 0:
+            warnings.warn('Digitization points not found. '
+                          'Cannot plot digitization.')
+
+    # do the plotting, surfaces then points
     from mayavi import mlab
-    alphas = [1.0, 0.5]
-    colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
-
     fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
 
-    for ii, surf in enumerate(surfs):
-
+    alphas = [1.0, 0.5]  # head, helmet
+    colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
+    for surf, alpha, color in zip(surfs, alphas, colors):
         x, y, z = surf['rr'].T
         nn = surf['nn']
         # make absolutely sure these are normalized for Mayavi
         nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
 
         # Make a solid surface
-        alpha = alphas[ii]
         with warnings.catch_warnings(record=True):  # traits
             mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
         mesh.data.point_data.normals = nn
         mesh.data.cell_data.normals = None
-        mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
+        mlab.pipeline.surface(mesh, color=color, opacity=alpha)
+
+    datas = (eeg_loc, meg_loc, car_loc, ext_loc)
+    colors = ((1., 0., 0.), (0., 0.25, 0.5), (1., 1., 0.), (1., 0.5, 0.))
+    alphas = (1.0, 0.25, 0.5, 0.25)
+    scales = (0.005, 0.0025, 0.015, 0.0075)
+    for data, color, alpha, scale in zip(datas, colors, alphas, scales):
+        if len(data) > 0:
+            with warnings.catch_warnings(record=True):  # traits
+                mlab.points3d(data[:, 0], data[:, 1], data[:, 2],
+                              color=color, scale_factor=scale, opacity=alpha)
+    mlab.view(90, 90)
+    return fig
 
-    if ch_type is None or ch_type == 'eeg':
-        eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
-                    if l['eeg_loc'] is not None]
 
-        if len(eeg_locs) > 0:
-            eeg_loc = np.array(eeg_locs)
+def _limits_to_control_points(clim, stc_data, colormap):
+    """Private helper function to convert limits (values or percentiles)
+    to control points.
 
-            # Transform EEG electrodes to MRI coordinates
-            eeg_loc = apply_trans(trans['trans'], eeg_loc)
+    Note: If using 'mne', generate cmap control points for a directly
+    mirrored cmap for simplicity (i.e., no normalization is computed to account
+    for a 2-tailed mne cmap).
 
-            with warnings.catch_warnings(record=True):  # traits
-                mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
-                              color=(1.0, 0.0, 0.0), scale_factor=0.005)
-        else:
-            warnings.warn('EEG electrode locations not found. '
-                          'Cannot plot EEG electrodes.')
+    Parameters
+    ----------
+    clim : str | dict
+        Desired limits use to set cmap control points.
 
-    mlab.view(90, 90)
-    return fig
+    Returns
+    -------
+    ctrl_pts : list (length 3)
+        Array of floats corresponding to values to use as cmap control points.
+    colormap : str
+        The colormap.
+    """
+
+    # Based on type of limits specified, get cmap control points
+    if colormap == 'auto':
+        if clim == 'auto':
+            colormap = 'mne' if (stc_data < 0).any() else 'hot'
+        else:
+            if 'lims' in clim:
+                colormap = 'hot'
+            else:  # 'pos_lims' in clim
+                colormap = 'mne'
+    if clim == 'auto':
+        # Set upper and lower bound based on percent, and get average between
+        ctrl_pts = np.percentile(np.abs(stc_data), [96, 97.5, 99.95])
+    elif isinstance(clim, dict):
+        # Get appropriate key for clim if it's a dict
+        limit_key = ['lims', 'pos_lims'][colormap in ('mne', 'mne_analyze')]
+        if colormap != 'auto' and limit_key not in clim.keys():
+            raise KeyError('"pos_lims" must be used with "mne" colormap')
+        clim['kind'] = clim.get('kind', 'percent')
+        if clim['kind'] == 'percent':
+            ctrl_pts = np.percentile(np.abs(stc_data),
+                                     list(np.abs(clim[limit_key])))
+        elif clim['kind'] == 'value':
+            ctrl_pts = np.array(clim[limit_key])
+            if (np.diff(ctrl_pts) < 0).any():
+                raise ValueError('value colormap limits must be strictly '
+                                 'nondecreasing')
+        else:
+            raise ValueError('If clim is a dict, clim[kind] must be '
+                             ' "value" or "percent"')
+    else:
+        raise ValueError('"clim" must be "auto" or dict')
+    if len(ctrl_pts) != 3:
+        raise ValueError('"lims" or "pos_lims" is length %i. It must be length'
+                         ' 3' % len(ctrl_pts))
+    ctrl_pts = np.array(ctrl_pts, float)
+    if len(set(ctrl_pts)) != 3:
+        if len(set(ctrl_pts)) == 1:  # three points match
+            if ctrl_pts[0] == 0:  # all are zero
+                warnings.warn('All data were zero')
+                ctrl_pts = np.arange(3, dtype=float)
+            else:
+                ctrl_pts *= [0., 0.5, 1]  # all nonzero pts == max
+        else:  # two points match
+            # if points one and two are identical, add a tiny bit to the
+            # control point two; if points two and three are identical,
+            # subtract a tiny bit from point two.
+            bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
+            ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
+
+    return ctrl_pts, colormap
 
 
 def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
-                          colormap='hot', time_label='time=%0.2f ms',
-                          smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
-                          transparent=True, alpha=1.0, time_viewer=False,
-                          config_opts={}, subjects_dir=None, figure=None,
-                          views='lat', colorbar=True):
+                          colormap='auto', time_label='time=%0.2f ms',
+                          smoothing_steps=10, transparent=None, alpha=1.0,
+                          time_viewer=False, config_opts=None,
+                          subjects_dir=None, figure=None, views='lat',
+                          colorbar=True, clim='auto'):
     """Plot SourceEstimates with PySurfer
 
     Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
@@ -359,22 +523,19 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
     surface : str
         The type of surface (inflated, white etc.).
     hemi : str, 'lh' | 'rh' | 'split' | 'both'
-        The hemisphere to display. Using 'both' or 'split' requires
-        PySurfer version 0.4 or above.
-    colormap : str
-        The type of colormap to use.
+        The hemisphere to display.
+    colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
+        Name of colormap to use or a custom look up table. If array, must
+        be (n x 3) or (n x 4) array for with RGB or RGBA values between
+        0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
+        based on whether 'lims' or 'pos_lims' are specified in `clim`.
     time_label : str
         How to print info about the time instant visualized.
     smoothing_steps : int
         The amount of smoothing
-    fmin : float
-        The minimum value to display.
-    fmid : float
-        The middle value on the colormap.
-    fmax : float
-        The maximum value for the colormap.
-    transparent : bool
+    transparent : bool | None
         If True, use a linear transparency between fmin and fmid.
+        None will choose automatically based on colormap type.
     alpha : float
         Alpha value to apply globally to the overlay.
     time_viewer : bool
@@ -394,26 +555,32 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
         View to use. See surfer.Brain().
     colorbar : bool
         If True, display colorbar on scene.
+    clim : str | dict
+        Colorbar properties specification. If 'auto', set clim automatically
+        based on data percentiles. If dict, should contain:
+
+            ``kind`` : str
+                Flag to specify type of limits. 'value' or 'percent'.
+            ``lims`` : list | np.ndarray | tuple of float, 3 elements
+                Note: Only use this if 'colormap' is not 'mne'.
+                Left, middle, and right bound for colormap.
+            ``pos_lims`` : list | np.ndarray | tuple of float, 3 elements
+                Note: Only use this if 'colormap' is 'mne'.
+                Left, middle, and right bound for colormap. Positive values
+                will be mirrored directly across zero during colormap
+                construction to obtain negative control points.
+
 
     Returns
     -------
     brain : Brain
         A instance of surfer.viz.Brain from PySurfer.
     """
-    import surfer
     from surfer import Brain, TimeViewer
+    config_opts = _handle_default('config_opts', config_opts)
 
-    if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
-        raise NotImplementedError('hemi type "%s" not supported with your '
-                                  'version of pysurfer. Please upgrade to '
-                                  'version 0.4 or higher.' % hemi)
-
-    try:
-        import mayavi
-        from mayavi import mlab
-    except ImportError:
-        from enthought import mayavi
-        from enthought.mayavi import mlab
+    import mayavi
+    from mayavi import mlab
 
     # import here to avoid circular import problem
     from ..source_estimate import SourceEstimate
@@ -434,7 +601,7 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
         # make sure it is of the correct type
         if not isinstance(figure, list):
             figure = [figure]
-        if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
+        if not all(isinstance(f, mayavi.core.scene.Scene) for f in figure):
             raise TypeError('figure must be a mayavi scene or list of scenes')
         # make sure we have the right number of figures
         n_fig = len(figure)
@@ -443,15 +610,22 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
                                'number of elements as PySurfer plots that '
                                'will be created (%s)' % n_split * n_views)
 
-    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
+    # convert control points to locations in colormap
+    ctrl_pts, colormap = _limits_to_control_points(clim, stc.data, colormap)
 
-    subject = _check_subject(stc.subject, subject, False)
-    if subject is None:
-        if 'SUBJECT' in os.environ:
-            subject = os.environ['SUBJECT']
-        else:
-            raise ValueError('SUBJECT environment variable not set')
+    # Construct cmap manually if 'mne' and get cmap bounds
+    # and triage transparent argument
+    if colormap in ('mne', 'mne_analyze'):
+        colormap = mne_analyze_colormap(ctrl_pts)
+        scale_pts = [-1 * ctrl_pts[-1], 0, ctrl_pts[-1]]
+        transparent = False if transparent is None else transparent
+    else:
+        scale_pts = ctrl_pts
+        transparent = True if transparent is None else transparent
 
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
+                                    raise_error=True)
+    subject = _check_subject(stc.subject, subject, True)
     if hemi in ['both', 'split']:
         hemis = ['lh', 'rh']
     else:
@@ -463,18 +637,15 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
                   subjects_dir=subjects_dir)
     if 'views' in args:
         kwargs['views'] = views
-    else:
-        logger.info('PySurfer does not support "views" argument, please '
-                    'consider updating to a newer version (0.4 or later)')
     with warnings.catch_warnings(record=True):  # traits warnings
         brain = Brain(subject, hemi, surface, **kwargs)
     for hemi in hemis:
         hemi_idx = 0 if hemi == 'lh' else 1
         if hemi_idx == 0:
-            data = stc.data[:len(stc.vertno[0])]
+            data = stc.data[:len(stc.vertices[0])]
         else:
-            data = stc.data[len(stc.vertno[0]):]
-        vertices = stc.vertno[hemi_idx]
+            data = stc.data[len(stc.vertices[0]):]
+        vertices = stc.vertices[hemi_idx]
         time = 1e3 * stc.times
         with warnings.catch_warnings(record=True):  # traits warnings
             brain.add_data(data, colormap=colormap, vertices=vertices,
@@ -483,8 +654,8 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
                            colorbar=colorbar)
 
         # scale colormap and set time (index) to display
-        brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
-                                  transparent=transparent)
+        brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
+                                  fmax=scale_pts[2], transparent=transparent)
 
     if time_viewer:
         TimeViewer(brain)
@@ -496,8 +667,8 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
                                  opacity=0.2, brain_color=(0.7,) * 3,
                                  show=True, high_resolution=False,
                                  fig_name=None, fig_number=None, labels=None,
-                                 modes=['cone', 'sphere'],
-                                 scale_factors=[1, 0.6],
+                                 modes=('cone', 'sphere'),
+                                 scale_factors=(1, 0.6),
                                  verbose=None, **kwargs):
     """Plot source estimates obtained with sparse solver
 
@@ -525,6 +696,8 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
         Brain color.
     show : bool
         Show figures if True.
+    high_resolution : bool
+        If True, plot on the original (non-downsampled) cortical mesh.
     fig_name :
         Mayavi figure name.
     fig_number :
@@ -534,11 +707,21 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
         label and the waveforms within each cluster are presented in
         the same color. labels should be a list of ndarrays when
         stcs is a list ie. one label for each stc.
+    modes : list
+        Should be a list, with each entry being ``'cone'`` or ``'sphere'``
+        to specify how the dipoles should be shown.
+    scale_factors : list
+        List of floating point scale factors for the markers.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
-    kwargs : kwargs
+    **kwargs : kwargs
         Keyword arguments to pass to mlab.triangular_mesh.
     """
+    known_modes = ['cone', 'sphere']
+    if not isinstance(modes, (list, tuple)) or \
+            not all(mode in known_modes for mode in modes):
+        raise ValueError('mode must be a list containing only '
+                         '"cone" or "sphere"')
     if not isinstance(stcs, list):
         stcs = [stcs]
     if labels is not None and not isinstance(labels, list):
@@ -573,11 +756,7 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
                for stc in stcs]
     unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
 
-    try:
-        from mayavi import mlab
-    except ImportError:
-        from enthought.mayavi import mlab
-
+    from mayavi import mlab
     from matplotlib.colors import ColorConverter
     color_converter = ColorConverter()
 
@@ -618,8 +797,8 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
         mode = modes[1] if is_common else modes[0]
         scale_factor = scale_factors[1] if is_common else scale_factors[0]
 
-        if (isinstance(scale_factor, (np.ndarray, list, tuple))
-                and len(unique_vertnos) == len(scale_factor)):
+        if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
+                len(unique_vertnos) == len(scale_factor)):
             scale_factor = scale_factor[idx]
 
         x, y, z = points[v]
@@ -633,8 +812,8 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
             mask = (vertno == v)
             assert np.sum(mask) == 1
             linestyle = linestyles[k]
-            plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
-                     linewidth=linewidth, linestyle=linestyle)
+            plt.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
+                     c=c, linewidth=linewidth, linestyle=linestyle)
 
     plt.xlabel('Time (ms)', fontsize=18)
     plt.ylabel('Source amplitude (nAm)', fontsize=18)
@@ -649,3 +828,98 @@ def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
     surface.actor.property.shading = True
 
     return surface
+
+
+def plot_dipole_locations(dipoles, trans, subject, subjects_dir=None,
+                          bgcolor=(1, 1, 1), opacity=0.3,
+                          brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
+                          fig_name=None, fig_size=(600, 600), mode='cone',
+                          scale_factor=0.1e-1, colors=None, verbose=None):
+    """Plot dipole locations
+
+    Only the location of the first time point of each dipole is shown.
+
+    Parameters
+    ----------
+    dipoles : list of instances of Dipole | Dipole
+        The dipoles to plot.
+    trans : dict
+        The mri to head trans.
+    subject : str
+        The subject name corresponding to FreeSurfer environment
+        variable SUBJECT.
+    subjects_dir : None | str
+        The path to the freesurfer subjects reconstructions.
+        It corresponds to Freesurfer environment variable SUBJECTS_DIR.
+        The default is None.
+    bgcolor : tuple of length 3
+        Background color in 3D.
+    opacity : float in [0, 1]
+        Opacity of brain mesh.
+    brain_color : tuple of length 3
+        Brain color.
+    mesh_color : tuple of length 3
+        Mesh color.
+    fig_name : str
+        Mayavi figure name.
+    fig_size : tuple of length 2
+        Mayavi figure size.
+    mode : str
+        Should be ``'cone'`` or ``'sphere'`` to specify how the
+        dipoles should be shown.
+    scale_factor : float
+        The scaling applied to amplitudes for the plot.
+    colors: list of colors | None
+        Color to plot with each dipole. If None default colors are used.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of mlab.Figure
+        The mayavi figure.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    from mayavi import mlab
+    from matplotlib.colors import ColorConverter
+    color_converter = ColorConverter()
+
+    trans = _get_mri_head_t(trans)[0]
+    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
+                                    raise_error=True)
+    fname = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
+    points, faces = read_surface(fname)
+    points = apply_trans(trans['trans'], points * 1e-3)
+
+    from .. import Dipole
+    if isinstance(dipoles, Dipole):
+        dipoles = [dipoles]
+
+    if mode not in ['cone', 'sphere']:
+        raise ValueError('mode must be in "cone" or "sphere"')
+
+    if colors is None:
+        colors = cycle(COLORS)
+
+    fig = mlab.figure(size=fig_size, bgcolor=bgcolor, fgcolor=(0, 0, 0))
+    with warnings.catch_warnings(record=True):  # FutureWarning in traits
+        mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2],
+                             faces, color=mesh_color, opacity=opacity)
+
+    for dip, color in zip(dipoles, colors):
+        rgb_color = color_converter.to_rgb(color)
+        with warnings.catch_warnings(record=True):  # FutureWarning in traits
+            mlab.quiver3d(dip.pos[0, 0], dip.pos[0, 1], dip.pos[0, 2],
+                          dip.ori[0, 0], dip.ori[0, 1], dip.ori[0, 2],
+                          opacity=1., mode=mode, color=rgb_color,
+                          scalars=dip.amplitude.max(),
+                          scale_factor=scale_factor)
+    if fig_name is not None:
+        mlab.title(fig_name)
+    if fig.scene is not None:  # safe for Travis
+        fig.scene.x_plus_view()
+
+    return fig
diff --git a/mne/viz/__init__.py b/mne/viz/__init__.py
index 738ff9b..cc3f0bf 100644
--- a/mne/viz/__init__.py
+++ b/mne/viz/__init__.py
@@ -1,20 +1,24 @@
 """Visualization routines
 """
 
-from .topomap import plot_evoked_topomap, plot_projs_topomap
-from .topomap import plot_ica_components, plot_ica_topomap
-from .topomap import plot_tfr_topomap, plot_topomap
-from .topo import (plot_topo, plot_topo_tfr, plot_topo_image_epochs,
+from .topomap import (plot_evoked_topomap, plot_projs_topomap,
+                      plot_ica_components, plot_tfr_topomap, plot_topomap,
+                      plot_epochs_psd_topomap)
+from .topo import (plot_topo, plot_topo_image_epochs,
                    iter_topography)
-from .utils import tight_layout, mne_analyze_colormap, compare_fiff
-from ._3d import plot_sparse_source_estimates, plot_source_estimates
-from ._3d import plot_trans, plot_evoked_field
-from .misc import plot_cov, plot_bem, plot_events
-from .misc import plot_source_spectrogram
-from .utils import _mutable_defaults
-from .evoked import plot_evoked, plot_evoked_image
+from .utils import (tight_layout, mne_analyze_colormap, compare_fiff,
+                    ClickableImage, add_background_image)
+from ._3d import (plot_sparse_source_estimates, plot_source_estimates,
+                  plot_trans, plot_evoked_field, plot_dipole_locations)
+from .misc import (plot_cov, plot_bem, plot_events, plot_source_spectrogram,
+                   _get_presser, plot_dipole_amplitudes)
+from .evoked import (plot_evoked, plot_evoked_image, plot_evoked_white,
+                     plot_snr_estimate, plot_evoked_topo)
 from .circle import plot_connectivity_circle, circular_layout
-from .epochs import plot_image_epochs, plot_drop_log, plot_epochs
-from .epochs import _drop_log_stats
-from .raw import plot_raw, plot_raw_psds
+from .epochs import (plot_image_epochs, plot_drop_log, plot_epochs,
+                     _drop_log_stats, plot_epochs_psd, plot_epochs_image)
+from .raw import plot_raw, plot_raw_psd
 from .ica import plot_ica_scores, plot_ica_sources, plot_ica_overlay
+from .ica import _plot_sources_raw, _plot_sources_epochs
+from .montage import plot_montage
+from .decoding import plot_gat_matrix, plot_gat_times
diff --git a/mne/viz/circle.py b/mne/viz/circle.py
index b2f8dab..7662b14 100644
--- a/mne/viz/circle.py
+++ b/mne/viz/circle.py
@@ -92,7 +92,8 @@ def circular_layout(node_names, node_order, start_pos=90, start_between=True,
 
 
 def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
-                                     n_nodes=0, node_angles=None, ylim=[9, 10]):
+                                     n_nodes=0, node_angles=None,
+                                     ylim=[9, 10]):
     """Isolates connections around a single node when user left clicks a node.
 
     On right click, resets all connections."""
@@ -114,7 +115,7 @@ def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
         fig.canvas.draw()
     elif event.button == 3:  # right click
         patches = event.inaxes.patches
-        for ii in xrange(np.size(indices, axis=1)):
+        for ii in range(np.size(indices, axis=1)):
             patches[ii].set_visible(True)
         fig.canvas.draw()
 
@@ -128,11 +129,12 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
                              colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
                              fontsize_title=12, fontsize_names=8,
                              fontsize_colorbar=8, padding=6.,
-                             fig=None, subplot=111, interactive=True):
+                             fig=None, subplot=111, interactive=True,
+                             node_linewidth=2., show=True):
     """Visualize connectivity as a circular graph.
 
     Note: This code is based on the circle graph example by Nicolas P. Rougier
-    http://www.loria.fr/~rougier/coding/recipes.html
+    http://www.labri.fr/perso/nrougier/coding/.
 
     Parameters
     ----------
@@ -198,6 +200,10 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
     interactive : bool
         When enabled, left-click on a node to show only connections to that
         node. Right-click shows all connections.
+    node_linewidth : float
+        Line with for nodes.
+    show : bool
+        Show figure if True.
 
     Returns
     -------
@@ -324,10 +330,10 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
         nodes_n_con_seen[start] += 1
         nodes_n_con_seen[end] += 1
 
-        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start])
-                           / float(nodes_n_con[start]))
-        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end])
-                         / float(nodes_n_con[end]))
+        start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
+                           float(nodes_n_con[start]))
+        end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
+                         float(nodes_n_con[end]))
 
     # scale connectivity for colormap (vmin<=>0, vmax<=>1)
     con_val_scaled = (con - vmin) / vrange
@@ -359,8 +365,8 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
     # Draw ring with colored nodes
     height = np.ones(n_nodes) * 1.0
     bars = axes.bar(node_angles, height, width=node_width, bottom=9,
-                    edgecolor=node_edgecolor, lw=2, facecolor='.9',
-                    align='center')
+                    edgecolor=node_edgecolor, lw=node_linewidth,
+                    facecolor='.9', align='center')
 
     for bar, color in zip(bars, node_colors):
         bar.set_facecolor(color)
@@ -395,7 +401,7 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
         cb.ax.tick_params(labelsize=fontsize_colorbar)
         plt.setp(cb_yticks, color=textcolor)
 
-    #Add callback for interaction
+    # Add callback for interaction
     if interactive:
         callback = partial(_plot_connectivity_circle_onpick, fig=fig,
                            axes=axes, indices=indices, n_nodes=n_nodes,
@@ -403,6 +409,6 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
 
         fig.canvas.mpl_connect('button_press_event', callback)
 
+    if show:
+        plt.show()
     return fig, axes
-
-
diff --git a/mne/viz/decoding.py b/mne/viz/decoding.py
new file mode 100644
index 0000000..9d88f15
--- /dev/null
+++ b/mne/viz/decoding.py
@@ -0,0 +1,236 @@
+"""Functions to plot decoding results
+"""
+from __future__ import print_function
+
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Clement Moutard <clement.moutard at gmail.com>
+#          Jean-Remi King <jeanremi.king at gmail.com>
+#
+# License: Simplified BSD
+
+import numpy as np
+import warnings
+
+
+def plot_gat_matrix(gat, title=None, vmin=None, vmax=None, tlim=None,
+                    ax=None, cmap='RdBu_r', show=True, colorbar=True,
+                    xlabel=True, ylabel=True):
+    """Plotting function of GeneralizationAcrossTime object
+
+    Predict each classifier. If multiple classifiers are passed, average
+    prediction across all classifier to result in a single prediction per
+    classifier.
+
+    Parameters
+    ----------
+    gat : instance of mne.decoding.GeneralizationAcrossTime
+        The gat object.
+    title : str | None
+        Figure title. Defaults to None.
+    vmin : float | None
+        Min color value for scores. If None, sets to min(gat.scores_).
+        Defaults to None.
+    vmax : float | None
+        Max color value for scores. If None, sets to max(gat.scores_).
+        Defaults to None.
+    tlim : array-like, (4,) | None
+        The temporal boundaries. If None, expands to
+        [tmin_train, tmax_train, tmin_test, tmax_test]. Defaults to None.
+    ax : object | None
+        Plot pointer. If None, generate new figure. Defaults to None.
+    cmap : str | cmap object
+        The color map to be used. Defaults to 'RdBu_r'.
+    show : bool
+        If True, the figure will be shown. Defaults to True.
+    colorbar : bool
+        If True, the colorbar of the figure is displayed. Defaults to True.
+    xlabel : bool
+        If True, the xlabel is displayed. Defaults to True.
+    ylabel : bool
+        If True, the ylabel is displayed. Defaults to True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure.
+    """
+    if not hasattr(gat, 'scores_'):
+        raise RuntimeError('Please score your data before trying to plot '
+                           'scores')
+    import matplotlib.pyplot as plt
+    if ax is None:
+        fig, ax = plt.subplots(1, 1)
+
+    # Define time limits
+    if tlim is None:
+        tt_times = gat.train_times_['times']
+        tn_times = gat.test_times_['times']
+        tlim = [tn_times[0][0], tn_times[-1][-1], tt_times[0], tt_times[-1]]
+
+    # Plot scores
+    im = ax.imshow(gat.scores_, interpolation='nearest', origin='lower',
+                   extent=tlim, vmin=vmin, vmax=vmax, cmap=cmap)
+    if xlabel is True:
+        ax.set_xlabel('Testing Time (s)')
+    if ylabel is True:
+        ax.set_ylabel('Training Time (s)')
+    if title is not None:
+        ax.set_title(title)
+    ax.axvline(0, color='k')
+    ax.axhline(0, color='k')
+    ax.set_xlim(tlim[:2])
+    ax.set_ylim(tlim[2:])
+    if colorbar is True:
+        plt.colorbar(im, ax=ax)
+    if show is True:
+        plt.show()
+    return fig if ax is None else ax.get_figure()
+
+
+def plot_gat_times(gat, train_time='diagonal', title=None, xmin=None,
+                   xmax=None, ymin=None, ymax=None, ax=None, show=True,
+                   color=None, xlabel=True, ylabel=True, legend=True,
+                   chance=True, label='Classif. score'):
+    """Plotting function of GeneralizationAcrossTime object
+
+    Plot the scores of the classifier trained at 'train_time'.
+
+    Parameters
+    ----------
+    gat : instance of mne.decoding.GeneralizationAcrossTime
+        The gat object.
+    train_time : 'diagonal' | float | list or array of float
+        Plot a 1d array of a portion of gat.scores_.
+        If set to 'diagonal', plots the gat.scores_ of classifiers
+        trained and tested at identical times
+        if set to float | list or array of float, plots scores of the
+        classifier(s) trained at (a) specific training time(s).
+        Default to 'diagonal'.
+    title : str | None
+        Figure title. Defaults to None.
+    xmin : float | None, optional
+        Min time value. Defaults to None.
+    xmax : float | None, optional
+        Max time value. Defaults to None.
+    ymin : float | None, optional
+        Min score value. If None, sets to min(scores). Defaults to None.
+    ymax : float | None, optional
+        Max score value. If None, sets to max(scores). Defaults to None.
+    ax : object | None
+        Plot pointer. If None, generate new figure. Defaults to None.
+    show : bool, optional
+        If True, the figure will be shown. Defaults to True.
+    color : str
+        Score line color. Defaults to 'steelblue'.
+    xlabel : bool
+        If True, the xlabel is displayed. Defaults to True.
+    ylabel : bool
+        If True, the ylabel is displayed. Defaults to True.
+    legend : bool
+        If True, a legend is displayed. Defaults to True.
+    chance : bool | float.
+        Plot chance level. If True, chance level is estimated from the type
+        of scorer. Defaults to None.
+    label : str
+        Score label used in the legend. Defaults to 'Classif. score'.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure.
+    """
+    if not hasattr(gat, 'scores_'):
+        raise RuntimeError('Please score your data before trying to plot '
+                           'scores')
+    import matplotlib.pyplot as plt
+    if ax is None:
+        fig, ax = plt.subplots(1, 1)
+
+    # Find and plot chance level
+    if chance is not False:
+        if chance is True:
+            chance = _get_chance_level(gat.scorer_, gat.y_train_)
+        ax.axhline(float(chance), color='k', linestyle='--',
+                   label="Chance level")
+    ax.axvline(0, color='k', label='')
+
+    if isinstance(train_time, (str, float)):
+        train_time = [train_time]
+        label = [label]
+    elif isinstance(train_time, (list, np.ndarray)):
+        label = train_time
+    else:
+        raise ValueError("train_time must be 'diagonal' | float | list or "
+                         "array of float.")
+
+    if color is None or isinstance(color, str):
+        color = np.tile(color, len(train_time))
+
+    for _train_time, _color, _label in zip(train_time, color, label):
+        _plot_gat_time(gat, _train_time, ax, _color, _label)
+
+    if title is not None:
+        ax.set_title(title)
+    if ymin is not None and ymax is not None:
+        ax.set_ylim(ymin, ymax)
+    if xmin is not None and xmax is not None:
+        ax.set_xlim(xmin, xmax)
+    if xlabel is True:
+        ax.set_xlabel('Time (s)')
+    if ylabel is True:
+        ax.set_ylabel('Classif. score ({0})'.format(
+                      'AUC' if 'roc' in repr(gat.scorer_) else r'%'))
+    if legend is True:
+        ax.legend(loc='best')
+    if show is True:
+        plt.show()
+    return fig if ax is None else ax.get_figure()
+
+
+def _plot_gat_time(gat, train_time, ax, color, label):
+    """Aux function of plot_gat_time
+
+    Plots a unique score 1d array"""
+    # Detect whether gat is a full matrix or just its diagonal
+    if np.all(np.unique([len(t) for t in gat.test_times_['times']]) == 1):
+        scores = gat.scores_
+    elif train_time == 'diagonal':
+        # Get scores from identical training and testing times even if GAT
+        # is not square.
+        scores = np.zeros(len(gat.scores_))
+        for train_idx, train_time in enumerate(gat.train_times_['times']):
+            for test_times in gat.test_times_['times']:
+                # find closest testing time from train_time
+                lag = test_times - train_time
+                test_idx = np.abs(lag).argmin()
+                # check that not more than 1 classifier away
+                if np.abs(lag[test_idx]) > gat.train_times_['step']:
+                    score = np.nan
+                else:
+                    score = gat.scores_[train_idx][test_idx]
+                scores[train_idx] = score
+    elif isinstance(train_time, float):
+        train_times = gat.train_times_['times']
+        idx = np.abs(train_times - train_time).argmin()
+        if train_times[idx] - train_time > gat.train_times_['step']:
+            raise ValueError("No classifier trained at %s " % train_time)
+        scores = gat.scores_[idx]
+    else:
+        raise ValueError("train_time must be 'diagonal' or a float.")
+    kwargs = dict()
+    if color is not None:
+        kwargs['color'] = color
+    ax.plot(gat.train_times_['times'], scores, label=str(label), **kwargs)
+
+
+def _get_chance_level(scorer, y_train):
+    # XXX JRK This should probably be solved within sklearn?
+    if scorer.__name__ == 'accuracy_score':
+        chance = np.max([np.mean(y_train == c) for c in np.unique(y_train)])
+    elif scorer.__name__ == 'roc_auc_score':
+        chance = 0.5
+    else:
+        chance = np.nan
+        warnings.warn('Cannot find chance level from %s, specify chance'
+                      ' level' % scorer.func_name)
+    return chance
diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py
index 11c660f..4e4e830 100644
--- a/mne/viz/epochs.py
+++ b/mne/viz/epochs.py
@@ -1,31 +1,34 @@
 """Functions to plot epochs data
 """
-from __future__ import print_function
 
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Denis Engemann <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
 #
 # License: Simplified BSD
 
-import warnings
-from collections import deque
 from functools import partial
+import copy
 
 import numpy as np
-from scipy import ndimage
 
-from ..utils import create_chunks
+from ..utils import verbose, get_config, set_config, deprecated
+from ..utils import logger
 from ..io.pick import pick_types, channel_type
-from ..fixes import Counter
-from .utils import _mutable_defaults, tight_layout, _prepare_trellis
-from .utils import figure_nobar
+from ..io.proj import setup_proj
+from ..fixes import Counter, _in1d
+from ..time_frequency import compute_epochs_psd
+from .utils import tight_layout, figure_nobar, _toggle_proj
+from .utils import _toggle_options, _layout_figure, _setup_vmin_vmax
+from .utils import _channels_changed, _plot_raw_onscroll, _onclick_help
+from ..defaults import _handle_default
 
 
-def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
+def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
                       vmax=None, colorbar=True, order=None, show=True,
-                      units=None, scalings=None, cmap='RdBu_r'):
+                      units=None, scalings=None, cmap='RdBu_r', fig=None):
     """Plot Event Related Potential / Fields image
 
     Parameters
@@ -37,7 +40,7 @@ def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
         data channels are plotted.
     sigma : float
         The standard deviation of the Gaussian smoothing to apply along
-        the epoch axis to apply in the image.
+        the epoch axis to apply in the image. If 0., no smoothing is applied.
     vmin : float
         The min value in the image. The unit is uV for EEG channels,
         fT for magnetometers and fT/cm for gradiometers
@@ -53,47 +56,57 @@ def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
         passed are the times vector and the data as 2d array
         (data.shape[1] == len(times)
     show : bool
-        Show or not the figure at the end
+        Show figure if True.
     units : dict | None
         The units of the channel types used for axes lables. If None,
         defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
     scalings : dict | None
         The scalings of the channel types to be applied for plotting.
-        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
+        If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
+        eog=1e6)`
     cmap : matplotlib colormap
         Colormap.
+    fig : matplotlib figure | None
+        Figure instance to draw the image to. Figure must contain two axes for
+        drawing the single trials and evoked responses. If None a new figure is
+        created. Defaults to None.
 
     Returns
     -------
     figs : the list of matplotlib figures
         One figure per channel displayed
     """
-    units, scalings = _mutable_defaults(('units', units),
-                                        ('scalings', scalings))
+    from scipy import ndimage
+    units = _handle_default('units', units)
+    scalings = _handle_default('scalings', scalings)
 
     import matplotlib.pyplot as plt
     if picks is None:
         picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
                            exclude='bads')
 
-    if list(units.keys()) != list(scalings.keys()):
+    if set(units.keys()) != set(scalings.keys()):
         raise ValueError('Scalings and units must have the same keys.')
 
     picks = np.atleast_1d(picks)
+    if fig is not None and len(picks) > 1:
+        raise ValueError('Only single pick can be drawn to a figure.')
     evoked = epochs.average(picks)
     data = epochs.get_data()[:, picks, :]
-    if vmin is None:
-        vmin = data.min()
-    if vmax is None:
-        vmax = data.max()
+    scale_vmin = True if vmin is None else False
+    scale_vmax = True if vmax is None else False
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
 
     figs = list()
     for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
-        this_fig = plt.figure()
+        if fig is None:
+            this_fig = plt.figure()
+        else:
+            this_fig = fig
         figs.append(this_fig)
 
         ch_type = channel_type(epochs.info, idx)
-        if not ch_type in scalings:
+        if ch_type not in scalings:
             # We know it's not in either scalings or units since keys match
             raise KeyError('%s type not in scalings and units' % ch_type)
         this_data *= scalings[ch_type]
@@ -105,13 +118,19 @@ def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
         if this_order is not None:
             this_data = this_data[this_order]
 
-        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
-
+        if sigma > 0.:
+            this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
+                                                  axis=0)
+        plt.figure(this_fig.number)
         ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
-        im = plt.imshow(this_data,
+        if scale_vmin:
+            vmin *= scalings[ch_type]
+        if scale_vmax:
+            vmax *= scalings[ch_type]
+        im = ax1.imshow(this_data,
                         extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
                                 0, len(data)],
-                        aspect='auto', origin='lower',
+                        aspect='auto', origin='lower', interpolation='nearest',
                         vmin=vmin, vmax=vmax, cmap=cmap)
         ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
         if colorbar:
@@ -121,10 +140,17 @@ def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
         ax1.axis('auto')
         ax1.axis('tight')
         ax1.axvline(0, color='m', linewidth=3, linestyle='--')
-        ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
+        evoked_data = scalings[ch_type] * evoked.data[i]
+        ax2.plot(1e3 * evoked.times, evoked_data)
         ax2.set_xlabel('Time (ms)')
+        ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
         ax2.set_ylabel(units[ch_type])
-        ax2.set_ylim([vmin, vmax])
+        evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
+        evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
+        if scale_vmin or scale_vmax:
+            evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
+            evoked_vmin = -evoked_vmax
+        ax2.set_ylim([evoked_vmin, evoked_vmax])
         ax2.axvline(0, color='m', linewidth=3, linestyle='--')
         if colorbar:
             plt.colorbar(im, cax=ax3)
@@ -136,6 +162,18 @@ def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
     return figs
 
 
+ at deprecated('`plot_image_epochs` is deprecated and will be removed in '
+            '"MNE 0.11." Please use plot_epochs_image instead')
+def plot_image_epochs(epochs, picks=None, sigma=0., vmin=None,
+                      vmax=None, colorbar=True, order=None, show=True,
+                      units=None, scalings=None, cmap='RdBu_r', fig=None):
+
+    return plot_epochs_image(epochs=epochs, picks=picks, sigma=sigma,
+                             vmin=vmin, vmax=None, colorbar=True, order=order,
+                             show=show, units=None, scalings=None, cmap=cmap,
+                             fig=fig)
+
+
 def _drop_log_stats(drop_log, ignore=['IGNORED']):
     """
     Parameters
@@ -157,14 +195,14 @@ def _drop_log_stats(drop_log, ignore=['IGNORED']):
         raise ValueError('drop_log must be a list of lists')
 
     perc = 100 * np.mean([len(d) > 0 for d in drop_log
-                          if not any([r in ignore for r in d])])
+                          if not any(r in ignore for r in d)])
 
     return perc
 
 
 def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
                   color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
-                  show=True, return_fig=False):
+                  show=True):
     """Show the channel stats based on a drop_log from Epochs
 
     Parameters
@@ -186,14 +224,9 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
         The drop reasons to ignore.
     show : bool
         Show figure if True.
-    return_fig : bool
-        Return only figure handle if True. This argument will default
-        to True in v0.9 and then be removed.
 
     Returns
     -------
-    perc : float
-        Total percentage of epochs dropped.
     fig : Instance of matplotlib.figure.Figure
         The figure.
     """
@@ -201,12 +234,13 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
     perc = _drop_log_stats(drop_log, ignore)
     scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
     ch_names = np.array(list(scores.keys()))
+    fig = plt.figure()
     if perc < threshold or len(ch_names) == 0:
-        return perc
+        plt.text(0, 0, 'No drops')
+        return fig
     counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
     n_plot = min(n_max_plot, len(ch_names))
     order = np.flipud(np.argsort(counts))
-    fig = plt.figure()
     plt.title('%s: %0.1f%%' % (subject, perc))
     x = np.arange(n_plot)
     plt.bar(x, counts[order[:n_plot]], color=color, width=width)
@@ -220,13 +254,7 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
     if show:
         plt.show()
 
-    if return_fig:
-        return fig
-    else:
-        msg = ("'return_fig=False' will be deprecated in v0.9. "
-               "Use 'Epochs.drop_log_stats' to get percentages instead.")
-        warnings.warn(msg, DeprecationWarning)
-        return perc, fig
+    return fig
 
 
 def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
@@ -234,19 +262,21 @@ def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
     """Aux functioin"""
     this = axes_handler[0]
     for ii, data_, ax in zip(epoch_idx, data, axes):
-        [l.set_data(times, d) for l, d in zip(ax.lines, data_[good_ch_idx])]
+        for l, d in zip(ax.lines, data_[good_ch_idx]):
+            l.set_data(times, d)
         if bad_ch_idx is not None:
             bad_lines = [ax.lines[k] for k in bad_ch_idx]
-            [l.set_data(times, d) for l, d in zip(bad_lines,
-                                                  data_[bad_ch_idx])]
+            for l, d in zip(bad_lines, data_[bad_ch_idx]):
+                l.set_data(times, d)
         if title_str is not None:
             ax.set_title(title_str % ii, fontsize=12)
         ax.set_ylim(data.min(), data.max())
-        ax.set_yticks([])
-        ax.set_xticks([])
+        ax.set_yticks(list())
+        ax.set_xticks(list())
         if vars(ax)[this]['reject'] is True:
             #  memorizing reject
-            [l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
+            for l in ax.lines:
+                l.set_color((0.8, 0.8, 0.8))
             ax.get_figure().canvas.draw()
         else:
             #  forgetting previous reject
@@ -254,9 +284,11 @@ def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
                 if k == this:
                     continue
                 if vars(ax).get(k, {}).get('reject', None) is True:
-                    [l.set_color('k') for l in ax.lines[:len(good_ch_idx)]]
+                    for l in ax.lines[:len(good_ch_idx)]:
+                        l.set_color('k')
                     if bad_ch_idx is not None:
-                        [l.set_color('r') for l in ax.lines[-len(bad_ch_idx):]]
+                        for l in ax.lines[-len(bad_ch_idx):]:
+                            l.set_color('r')
                     ax.get_figure().canvas.draw()
                     break
 
@@ -284,7 +316,7 @@ def _epochs_navigation_onclick(event, params):
                           p['data'][this_idx],
                           p['times'], p['axes'], p['title_str'],
                           p['axes_handler'])
-            # XXX don't ask me why
+        # XXX don't ask me why
         p['axes'][0].get_figure().canvas.draw()
 
 
@@ -300,152 +332,1186 @@ def _epochs_axes_onclick(event, params):
         idx = here['idx']
         if idx not in p['reject_idx']:
             p['reject_idx'].append(idx)
-            [l.set_color(reject_color) for l in ax.lines]
+            for l in ax.lines:
+                l.set_color(reject_color)
             here['reject'] = True
     elif here.get('reject', None) is True:
         idx = here['idx']
         if idx in p['reject_idx']:
             p['reject_idx'].pop(p['reject_idx'].index(idx))
             good_lines = [ax.lines[k] for k in p['good_ch_idx']]
-            [l.set_color('k') for l in good_lines]
+            for l in good_lines:
+                l.set_color('k')
             if p['bad_ch_idx'] is not None:
                 bad_lines = ax.lines[-len(p['bad_ch_idx']):]
-                [l.set_color('r') for l in bad_lines]
+                for l in bad_lines:
+                    l.set_color('r')
             here['reject'] = False
     ax.get_figure().canvas.draw()
 
 
-def plot_epochs(epochs, epoch_idx=None, picks=None, scalings=None,
-                title_str='#%003i', show=True, block=False):
-    """ Visualize single trials using Trellis plot.
+def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
+                n_channels=20, title=None, show=True, block=False):
+    """ Visualize epochs
+
+    Bad epochs can be marked with a left click on top of the epoch. Bad
+    channels can be selected by clicking the channel name on the left side of
+    the main axes. Calling this function drops all the selected bad epochs as
+    well as bad epochs marked beforehand with rejection parameters.
 
     Parameters
     ----------
 
     epochs : instance of Epochs
         The epochs object
-    epoch_idx : array-like | int | None
-        The epochs to visualize. If None, the first 20 epochs are shown.
-        Defaults to None.
     picks : array-like of int | None
         Channels to be included. If None only good data channels are used.
         Defaults to None
     scalings : dict | None
-        Scale factors for the traces. If None, defaults to:
-        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
-             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
-    title_str : None | str
-        The string formatting to use for axes titles. If None, no titles
-        will be shown. Defaults expand to ``#001, #002, ...``
+        Scale factors for the traces. If None, defaults to::
+
+            dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                 emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
+
+    n_epochs : int
+        The number of epochs per view. Defaults to 20.
+    n_channels : int
+        The number of channels per view. Defaults to 20.
+    title : str | None
+        The title of the window. If None, epochs name will be displayed.
+        Defaults to None.
     show : bool
-        Whether to show the figure or not.
+        Show figure if True. Defaults to True
     block : bool
         Whether to halt program execution until the figure is closed.
-        Useful for rejecting bad trials on the fly by clicking on a
-        sub plot.
+        Useful for rejecting bad trials on the fly by clicking on an epoch.
+        Defaults to False.
 
     Returns
     -------
     fig : Instance of matplotlib.figure.Figure
         The figure.
+
+    Notes
+    -----
+    With trellis set to False, the arrow keys (up/down/left/right) can
+    be used to navigate between channels and epochs and the scaling can be
+    adjusted with - and + (or =) keys, but this depends on the backend
+    matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
+    Full screen mode can be to toggled with f11 key. The amount of epochs and
+    channels per view can be adjusted with home/end and page down/page up keys.
+    Butterfly plot can be toggled with ``b`` key. Right mouse click adds a
+    vertical line to the plot.
+    """
+    import matplotlib.pyplot as plt
+    scalings = _handle_default('scalings_plot_raw', scalings)
+
+    projs = epochs.info['projs']
+
+    params = {'epochs': epochs,
+              'orig_data': np.concatenate(epochs.get_data(), axis=1),
+              'info': copy.deepcopy(epochs.info),
+              'bad_color': (0.8, 0.8, 0.8),
+              't_start': 0}
+    params['label_click_fun'] = partial(_pick_bad_channels, params=params)
+    _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
+                               title, picks)
+
+    callback_close = partial(_close_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+ at verbose
+def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
+                    proj=False, n_fft=256,
+                    picks=None, ax=None, color='black', area_mode='std',
+                    area_alpha=0.33, n_overlap=0,
+                    dB=True, n_jobs=1, show=True, verbose=None):
+    """Plot the power spectral density across epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs object
+    fmin : float
+        Start frequency to consider.
+    fmax : float
+        End frequency to consider.
+    tmin : float | None
+        Start time to consider.
+    tmax : float | None
+        End time to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    picks : array-like of int | None
+        List of channels to use.
+    ax : instance of matplotlib Axes | None
+        Axes to plot into. If None, axes will be created.
+    color : str | tuple
+        A matplotlib-compatible color to use.
+    area_mode : str | None
+        Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
+        will be plotted. If 'range', the min and max (across channels) will be
+        plotted. Bad channels will be excluded from these calculations.
+        If None, no area will be plotted.
+    area_alpha : float
+        Alpha for the area.
+    n_overlap : int
+        The number of points of overlap between blocks.
+    dB : bool
+        If True, transform data to decibels.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    show : bool
+        Show figure if True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
     """
     import matplotlib.pyplot as plt
+    from .raw import _set_psd_plot_params
+    fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
+        epochs.info, proj, picks, ax, area_mode)
+
+    for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
+                                                ax_list)):
+        psds, freqs = compute_epochs_psd(epochs, picks=picks, fmin=fmin,
+                                         fmax=fmax, tmin=tmin, tmax=tmax,
+                                         n_fft=n_fft,
+                                         n_overlap=n_overlap, proj=proj,
+                                         n_jobs=n_jobs)
+
+        # Convert PSDs to dB
+        if dB:
+            psds = 10 * np.log10(psds)
+            unit = 'dB'
+        else:
+            unit = 'power'
+        # mean across epochs and channels
+        psd_mean = np.mean(psds, axis=0).mean(axis=0)
+        if area_mode == 'std':
+            # std across channels
+            psd_std = np.std(np.mean(psds, axis=0), axis=0)
+            hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
+        elif area_mode == 'range':
+            hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
+                          np.max(np.mean(psds, axis=0), axis=0))
+        else:  # area_mode is None
+            hyp_limits = None
+
+        ax.plot(freqs, psd_mean, color=color)
+        if hyp_limits is not None:
+            ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
+                            color=color, alpha=area_alpha)
+        if make_label:
+            if ii == len(picks_list) - 1:
+                ax.set_xlabel('Freq (Hz)')
+            if ii == len(picks_list) // 2:
+                ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
+            ax.set_title(title)
+            ax.set_xlim(freqs[0], freqs[-1])
+    if make_label:
+        tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
+    if show:
+        plt.show()
+    return fig
+
+
+def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
+                               title, picks, order=None):
+    """Helper for setting up the mne_browse_epochs window."""
+    import matplotlib.pyplot as plt
     import matplotlib as mpl
-    scalings = _mutable_defaults(('scalings_plot_raw', None))[0]
-    if np.isscalar(epoch_idx):
-        epoch_idx = [epoch_idx]
-    if epoch_idx is None:
-        n_events = len(epochs.events)
-        epoch_idx = list(range(n_events))
-    else:
-        n_events = len(epoch_idx)
-    epoch_idx = epoch_idx[:n_events]
-    idx_handler = deque(create_chunks(epoch_idx, 20))
+    from matplotlib.collections import LineCollection
+    from matplotlib.colors import colorConverter
+    epochs = params['epochs']
 
     if picks is None:
-        if any('ICA' in k for k in epochs.ch_names):
-            picks = pick_types(epochs.info, misc=True, ref_meg=False,
-                               exclude=[])
-        else:
-            picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
-                               exclude=[])
+        picks = _handle_picks(epochs)
     if len(picks) < 1:
         raise RuntimeError('No appropriate channels found. Please'
                            ' check your picks')
-    times = epochs.times * 1e3
-    n_channels = epochs.info['nchan']
-    types = [channel_type(epochs.info, idx) for idx in
-             picks]
-
-    # preallocation needed for min / max scaling
-    data = np.zeros((len(epochs.events), n_channels, len(times)))
-    for ii, epoch in enumerate(epochs.get_data()):
-        for jj, (this_type, this_channel) in enumerate(zip(types, epoch)):
-            data[ii, jj] = this_channel / scalings[this_type]
-
-    n_events = len(epochs.events)
-    epoch_idx = epoch_idx[:n_events]
-    idx_handler = deque(create_chunks(epoch_idx, 20))
-    # handle bads
-    bad_ch_idx = None
-    ch_names = epochs.ch_names
-    bads = epochs.info['bads']
-    if any([ch_names[k] in bads for k in picks]):
-        ch_picked = [k for k in ch_names if ch_names.index(k) in picks]
-        bad_ch_idx = [ch_picked.index(k) for k in bads if k in ch_names]
-        good_ch_idx = [p for p in picks if p not in bad_ch_idx]
+    picks = sorted(picks)
+    # Reorganize channels
+    inds = list()
+    types = list()
+    for t in ['grad', 'mag']:
+        idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
+        if len(idxs) < 1:
+            continue
+        mask = _in1d(idxs, picks, assume_unique=True)
+        inds.append(idxs[mask])
+        types += [t] * len(inds[-1])
+    pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
+    if order is None:
+        order = ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp', 'misc',
+                 'chpi', 'syst', 'ias', 'exci']
+    for ch_type in order:
+        pick_kwargs[ch_type] = True
+        idxs = pick_types(params['info'], **pick_kwargs)
+        if len(idxs) < 1:
+            continue
+        mask = _in1d(idxs, picks, assume_unique=True)
+        inds.append(idxs[mask])
+        types += [ch_type] * len(inds[-1])
+        pick_kwargs[ch_type] = False
+    inds = np.concatenate(inds).astype(int)
+    if not len(inds) == len(picks):
+        raise RuntimeError('Some channels not classified. Please'
+                           ' check your picks')
+    ch_names = [params['info']['ch_names'][x] for x in inds]
+
+    # set up plotting
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    n_epochs = min(n_epochs, len(epochs.events))
+    duration = len(epochs.times) * n_epochs
+    n_channels = min(n_channels, len(picks))
+    if size is not None:
+        size = size.split(',')
+        size = tuple(float(s) for s in size)
+    if title is None:
+        title = epochs.name
+        if epochs.name is None or len(title) == 0:
+            title = ''
+    fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
+    fig.canvas.set_window_title('mne_browse_epochs')
+    ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
+
+    ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
+                ha='center', va='bottom', size=12, xycoords='axes fraction',
+                textcoords='offset points')
+    color = _handle_default('color', None)
+
+    ax.axis([0, duration, 0, 200])
+    ax2 = ax.twiny()
+    ax2.set_zorder(-1)
+    ax2.axis([0, duration, 0, 200])
+    ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Epochs')
+    ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
+                                               facecolor='w', zorder=2))
+
+    ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
+    help_button = mpl.widgets.Button(ax_help_button, 'Help')
+    help_button.on_clicked(partial(_onclick_help, params=params))
+
+    # populate vertical and horizontal scrollbars
+    for ci in range(len(picks)):
+        if ch_names[ci] in params['info']['bads']:
+            this_color = params['bad_color']
+        else:
+            this_color = color[types[ci]]
+        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
+                                                   facecolor=this_color,
+                                                   edgecolor=this_color,
+                                                   zorder=3))
+
+    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
+                                       edgecolor='w', facecolor='w', zorder=4)
+    ax_vscroll.add_patch(vsel_patch)
+
+    ax_vscroll.set_ylim(len(types), 0)
+    ax_vscroll.set_title('Ch.')
+
+    # populate colors list
+    type_colors = [colorConverter.to_rgba(color[c]) for c in types]
+    colors = list()
+    for color_idx in range(len(type_colors)):
+        colors.append([type_colors[color_idx]] * len(epochs.events))
+    lines = list()
+    n_times = len(epochs.times)
+
+    for ch_idx in range(n_channels):
+        if len(colors) - 1 < ch_idx:
+            break
+        lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                            zorder=2, picker=3.)
+        ax.add_collection(lc)
+        lines.append(lc)
+
+    times = epochs.times
+    data = np.zeros((params['info']['nchan'], len(times) * len(epochs.events)))
+
+    ylim = (25., 0.)  # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
+    # make shells for plotting traces
+    offset = ylim[0] / n_channels
+    offsets = np.arange(n_channels) * offset + (offset / 2.)
+
+    times = np.arange(len(data[0]))
+    epoch_times = np.arange(0, len(times), n_times)
+
+    ax.set_yticks(offsets)
+    ax.set_ylim(ylim)
+    ticks = epoch_times + 0.5 * n_times
+    ax.set_xticks(ticks)
+    ax2.set_xticks(ticks[:n_epochs])
+    labels = list(range(1, len(ticks) + 1))  # epoch numbers
+    ax.set_xticklabels(labels)
+    ax2.set_xticklabels(labels)
+    xlim = epoch_times[-1] + len(epochs.times)
+    ax_hscroll.set_xlim(0, xlim)
+    vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
+
+    # fit horizontal scroll bar ticks
+    hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
+    hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
+    hticks = list()
+    for tick in hscroll_ticks:
+        hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
+    hlabels = [x / n_times + 1 for x in hticks]
+    ax_hscroll.set_xticks(hticks)
+    ax_hscroll.set_xticklabels(hlabels)
+
+    for epoch_idx in range(len(epoch_times)):
+        ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
+                                                   n_times, 1, facecolor='w',
+                                                   edgecolor='w', alpha=0.6))
+    hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
+                                       edgecolor='k',
+                                       facecolor=(0.75, 0.75, 0.75),
+                                       alpha=0.25, linewidth=1, clip_on=False)
+    ax_hscroll.add_patch(hsel_patch)
+    text = ax.text(0, 0, 'blank', zorder=2, verticalalignment='baseline',
+                   ha='left', fontweight='bold')
+    text.set_visible(False)
+
+    params.update({'fig': fig,
+                   'ax': ax,
+                   'ax2': ax2,
+                   'ax_hscroll': ax_hscroll,
+                   'ax_vscroll': ax_vscroll,
+                   'vsel_patch': vsel_patch,
+                   'hsel_patch': hsel_patch,
+                   'lines': lines,
+                   'projs': projs,
+                   'ch_names': ch_names,
+                   'n_channels': n_channels,
+                   'n_epochs': n_epochs,
+                   'scalings': scalings,
+                   'duration': duration,
+                   'ch_start': 0,
+                   'colors': colors,
+                   'def_colors': type_colors,  # don't change at runtime
+                   'picks': picks,
+                   'bads': np.array(list(), dtype=int),
+                   'data': data,
+                   'times': times,
+                   'epoch_times': epoch_times,
+                   'offsets': offsets,
+                   'labels': labels,
+                   'scale_factor': 1.0,
+                   'butterfly_scale': 1.0,
+                   'fig_proj': None,
+                   'types': np.array(types),
+                   'inds': inds,
+                   'vert_lines': list(),
+                   'vertline_t': vertline_t,
+                   'butterfly': False,
+                   'text': text,
+                   'ax_help_button': ax_help_button,  # needed for positioning
+                   'help_button': help_button,  # reference needed for clicks
+                   'fig_options': None,
+                   'settings': [True, True, True, True],
+                   'image_plot': None})
+
+    params['plot_fun'] = partial(_plot_traces, params=params)
+
+    if len(projs) > 0 and not epochs.proj:
+        ax_button = plt.subplot2grid((10, 15), (9, 14))
+        opt_button = mpl.widgets.Button(ax_button, 'Proj')
+        callback_option = partial(_toggle_options, params=params)
+        opt_button.on_clicked(callback_option)
+        params['opt_button'] = opt_button
+        params['ax_button'] = ax_button
+
+    # callbacks
+    callback_scroll = partial(_plot_onscroll, params=params)
+    fig.canvas.mpl_connect('scroll_event', callback_scroll)
+    callback_click = partial(_mouse_click, params=params)
+    fig.canvas.mpl_connect('button_press_event', callback_click)
+    callback_key = partial(_plot_onkey, params=params)
+    fig.canvas.mpl_connect('key_press_event', callback_key)
+    callback_resize = partial(_resize_event, params=params)
+    fig.canvas.mpl_connect('resize_event', callback_resize)
+    fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
+
+    # Draw event lines for the first time.
+    _plot_vert_lines(params)
+
+    # As here code is shared with plot_evoked, some extra steps:
+    # first the actual plot update function
+    params['plot_update_proj_callback'] = _plot_update_epochs_proj
+    # then the toggle handler
+    callback_proj = partial(_toggle_proj, params=params)
+    # store these for use by callbacks in the options figure
+    params['callback_proj'] = callback_proj
+    params['callback_key'] = callback_key
+
+    callback_proj('none')
+    _layout_figure(params)
+
+
+def _plot_traces(params):
+    """ Helper for plotting concatenated epochs """
+    params['text'].set_visible(False)
+    ax = params['ax']
+    butterfly = params['butterfly']
+    if butterfly:
+        ch_start = 0
+        n_channels = len(params['picks'])
+        data = params['data'] * params['butterfly_scale']
     else:
-        good_ch_idx = np.arange(n_channels)
+        ch_start = params['ch_start']
+        n_channels = params['n_channels']
+        data = params['data'] * params['scale_factor']
+    offsets = params['offsets']
+    lines = params['lines']
+    epochs = params['epochs']
 
-    fig, axes = _prepare_trellis(len(data[idx_handler[0]]), max_col=5)
-    axes_handler = deque(list(range(len(idx_handler))))
-    for ii, data_, ax in zip(idx_handler[0], data[idx_handler[0]], axes):
-        ax.plot(times, data_[good_ch_idx].T, color='k')
-        if bad_ch_idx is not None:
-            ax.plot(times, data_[bad_ch_idx].T, color='r')
-        if title_str is not None:
-            ax.set_title(title_str % ii, fontsize=12)
-        ax.set_ylim(data.min(), data.max())
-        ax.set_yticks([])
-        ax.set_xticks([])
-        vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False}
-
-    # initialize memory
-    for this_view, this_inds in zip(axes_handler, idx_handler):
-        for ii, ax in zip(this_inds, axes):
-            vars(ax)[this_view] = {'idx': ii, 'reject': False}
-
-    tight_layout(fig=fig)
-    navigation = figure_nobar(figsize=(3, 1.5))
-    from matplotlib import gridspec
-    gs = gridspec.GridSpec(2, 2)
-    ax1 = plt.subplot(gs[0, 0])
-    ax2 = plt.subplot(gs[0, 1])
-    ax3 = plt.subplot(gs[1, :])
-
-    params = {
-        'fig': fig,
-        'idx_handler': idx_handler,
-        'epochs': epochs,
-        'picks': picks,
-        'times': times,
-        'scalings': scalings,
-        'good_ch_idx': good_ch_idx,
-        'bad_ch_idx': bad_ch_idx,
-        'axes': axes,
-        'back': mpl.widgets.Button(ax1, 'back'),
-        'next': mpl.widgets.Button(ax2, 'next'),
-        'reject-quit': mpl.widgets.Button(ax3, 'reject-quit'),
-        'title_str': title_str,
-        'reject_idx': [],
-        'axes_handler': axes_handler,
-        'data': data
-    }
-    fig.canvas.mpl_connect('button_press_event',
-                           partial(_epochs_axes_onclick, params=params))
-    navigation.canvas.mpl_connect('button_press_event',
-                                  partial(_epochs_navigation_onclick,
-                                          params=params))
-    if show is True:
-        plt.show(block=block)
-    return fig
+    n_times = len(epochs.times)
+    tick_list = list()
+    start_idx = int(params['t_start'] / n_times)
+    end = params['t_start'] + params['duration']
+    end_idx = int(end / n_times)
+    xlabels = params['labels'][start_idx:]
+    event_ids = params['epochs'].events[:, 2]
+    params['ax2'].set_xticklabels(event_ids[start_idx:])
+    ax.set_xticklabels(xlabels)
+    ylabels = ax.yaxis.get_ticklabels()
+    # do the plotting
+    for line_idx in range(n_channels):
+        ch_idx = line_idx + ch_start
+        if line_idx >= len(lines):
+            break
+        elif ch_idx < len(params['ch_names']):
+            if butterfly:
+                ch_type = params['types'][ch_idx]
+                if ch_type == 'grad':
+                    offset = offsets[0]
+                elif ch_type == 'mag':
+                    offset = offsets[1]
+                elif ch_type == 'eeg':
+                    offset = offsets[2]
+                elif ch_type == 'eog':
+                    offset = offsets[3]
+                elif ch_type == 'ecg':
+                    offset = offsets[4]
+                else:
+                    lines[line_idx].set_segments(list())
+            else:
+                tick_list += [params['ch_names'][ch_idx]]
+                offset = offsets[line_idx]
+            this_data = data[ch_idx][params['t_start']:end]
+
+            # subtraction here gets correct orientation for flipped ylim
+            ydata = offset - this_data
+            xdata = params['times'][:params['duration']]
+            num_epochs = np.min([params['n_epochs'],
+                                len(epochs.events)])
+            segments = np.split(np.array((xdata, ydata)).T, num_epochs)
+
+            ch_name = params['ch_names'][ch_idx]
+            if ch_name in params['info']['bads']:
+                if not butterfly:
+                    this_color = params['bad_color']
+                    ylabels[line_idx].set_color(this_color)
+                this_color = np.tile((params['bad_color']), (num_epochs, 1))
+                for bad_idx in params['bads']:
+                    if bad_idx < start_idx or bad_idx > end_idx:
+                        continue
+                    this_color[bad_idx - start_idx] = (1., 0., 0.)
+                lines[line_idx].set_zorder(1)
+            else:
+                this_color = params['colors'][ch_idx][start_idx:end_idx]
+                lines[line_idx].set_zorder(2)
+                if not butterfly:
+                    ylabels[line_idx].set_color('black')
+            lines[line_idx].set_segments(segments)
+            lines[line_idx].set_color(this_color)
+        else:
+            lines[line_idx].set_segments(list())
+
+    # finalize plot
+    ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
+                False)
+    params['ax2'].set_xlim(params['times'][0],
+                           params['times'][0] + params['duration'], False)
+    if butterfly:
+        factor = -1. / params['butterfly_scale']
+        labels = np.empty(20, dtype='S15')
+        labels.fill('')
+        ticks = ax.get_yticks()
+        idx_offset = 1
+        if 'grad' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
+                                               params['scalings']['grad'] *
+                                               1e13 * factor)
+            idx_offset += 4
+        if 'mag' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
+                                               params['scalings']['mag'] *
+                                               1e15 * factor)
+            idx_offset += 4
+        if 'eeg' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
+                                               params['scalings']['eeg'] *
+                                               1e6 * factor)
+            idx_offset += 4
+        if 'eog' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
+                                               params['scalings']['eog'] *
+                                               1e6 * factor)
+            idx_offset += 4
+        if 'ecg' in params['types']:
+            labels[idx_offset + 1] = '0.00'
+            for idx in [idx_offset, idx_offset + 2]:
+                labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
+                                               params['scalings']['ecg'] *
+                                               1e6 * factor)
+        ax.set_yticklabels(labels, fontsize=12, color='black')
+    else:
+        ax.set_yticklabels(tick_list, fontsize=12)
+    params['vsel_patch'].set_y(ch_start)
+    params['fig'].canvas.draw()
+    # XXX This is a hack to make sure this figure gets drawn last
+    # so that when matplotlib goes to calculate bounds we don't get a
+    # CGContextRef error on the MacOSX backend :(
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _plot_update_epochs_proj(params, bools):
+    """Helper only needs to be called when proj is changed"""
+    if bools is not None:
+        inds = np.where(bools)[0]
+        params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                                   for ii in inds]
+        params['proj_bools'] = bools
+    params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
+                                        verbose=False)
+
+    data = params['orig_data']
+    if params['projector'] is not None:
+        data = np.dot(params['projector'], data)
+    types = params['types']
+    for pick, ind in enumerate(params['inds']):
+        params['data'][pick] = data[ind] / params['scalings'][types[pick]]
+    params['plot_fun']()
+
+
+def _handle_picks(epochs):
+    """Aux function to handle picks."""
+    if any('ICA' in k for k in epochs.ch_names):
+        picks = pick_types(epochs.info, misc=True, ref_meg=False,
+                           exclude=[])
+    else:
+        picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
+                           ref_meg=False, exclude=[])
+    return picks
+
+
+def _plot_window(value, params):
+    """Deal with horizontal shift of the viewport."""
+    max_times = len(params['times']) - params['duration']
+    if value > max_times:
+        value = len(params['times']) - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+        params['plot_fun']()
+
+
+def _plot_vert_lines(params):
+    """ Helper function for plotting vertical lines."""
+    ax = params['ax']
+    while len(ax.lines) > 0:
+        ax.lines.pop()
+    params['vert_lines'] = list()
+    params['vertline_t'].set_text('')
+
+    epochs = params['epochs']
+    if params['settings'][3]:  # if zeroline visible
+        t_zero = np.where(epochs.times == 0.)[0]
+        if len(t_zero) == 1:
+            for event_idx in range(len(epochs.events)):
+                pos = [event_idx * len(epochs.times) + t_zero[0],
+                       event_idx * len(epochs.times) + t_zero[0]]
+                ax.plot(pos, ax.get_ylim(), 'g', zorder=3, alpha=0.4)
+    for epoch_idx in range(len(epochs.events)):
+        pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
+        ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=1)
+
+
+def _pick_bad_epochs(event, params):
+    """Helper for selecting / dropping bad epochs"""
+    if 'ica' in params:
+        pos = (event.xdata, event.ydata)
+        _pick_bad_channels(pos, params)
+        return
+    n_times = len(params['epochs'].times)
+    start_idx = int(params['t_start'] / n_times)
+    xdata = event.xdata
+    xlim = event.inaxes.get_xlim()
+    epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
+    total_epochs = len(params['epochs'].events)
+    if epoch_idx > total_epochs - 1:
+        return
+    # remove bad epoch
+    if epoch_idx in params['bads']:
+        params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
+        for ch_idx in range(len(params['ch_names'])):
+            params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
+        params['ax_hscroll'].patches[epoch_idx].set_color('w')
+        params['ax_hscroll'].patches[epoch_idx].set_zorder(1)
+        params['plot_fun']()
+        return
+    # add bad epoch
+    params['bads'] = np.append(params['bads'], epoch_idx)
+    params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
+    params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
+    params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
+    for ch_idx in range(len(params['ch_names'])):
+        params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
+    params['plot_fun']()
+
+
+def _pick_bad_channels(pos, params):
+    """Helper function for selecting bad channels."""
+    text, ch_idx = _label2idx(params, pos)
+    if text is None:
+        return
+    if text in params['info']['bads']:
+        while text in params['info']['bads']:
+            params['info']['bads'].remove(text)
+        color = params['def_colors'][ch_idx]
+        params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
+    else:
+        params['info']['bads'].append(text)
+        color = params['bad_color']
+        params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
+    if 'ica' in params:
+        params['plot_fun']()
+    else:
+        params['plot_update_proj_callback'](params, None)
+
+
+def _plot_onscroll(event, params):
+    """Function to handle scroll events."""
+    if event.key == 'control':
+        if event.step < 0:
+            event.key = '-'
+        else:
+            event.key = '+'
+        _plot_onkey(event, params)
+        return
+    if params['butterfly']:
+        return
+    _plot_raw_onscroll(event, params, len(params['ch_names']))
+
+
+def _mouse_click(event, params):
+    """Function to handle mouse click events."""
+    if event.inaxes is None:
+        if params['butterfly'] or not params['settings'][0]:
+            return
+        ax = params['ax']
+        ylim = ax.get_ylim()
+        pos = ax.transData.inverted().transform((event.x, event.y))
+        if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
+            return
+        if event.button == 1:  # left click
+            params['label_click_fun'](pos)
+        elif event.button == 3:  # right click
+            if 'ica' not in params:
+                _, ch_idx = _label2idx(params, pos)
+                if ch_idx is None:
+                    return
+                if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
+                                                                'eeg', 'eog']:
+                    logger.info('Event related fields / potentials only '
+                                'available for MEG and EEG channels.')
+                    return
+                fig = plot_epochs_image(params['epochs'],
+                                        picks=params['inds'][ch_idx],
+                                        fig=params['image_plot'])[0]
+                params['image_plot'] = fig
+    elif event.button == 1:  # left click
+        # vertical scroll bar changed
+        if event.inaxes == params['ax_vscroll']:
+            if params['butterfly']:
+                return
+            ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+            if params['ch_start'] != ch_start:
+                params['ch_start'] = ch_start
+                params['plot_fun']()
+        # horizontal scroll bar changed
+        elif event.inaxes == params['ax_hscroll']:
+            # find the closest epoch time
+            times = params['epoch_times']
+            offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
+            xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
+            _plot_window(xdata, params)
+        # main axes
+        elif event.inaxes == params['ax']:
+            _pick_bad_epochs(event, params)
+
+    elif event.inaxes == params['ax'] and event.button == 2:  # middle click
+        params['fig'].canvas.draw()
+        if params['fig_proj'] is not None:
+            params['fig_proj'].canvas.draw()
+    elif event.inaxes == params['ax'] and event.button == 3:  # right click
+        n_times = len(params['epochs'].times)
+        xdata = int(event.xdata % n_times)
+        prev_xdata = 0
+        if len(params['vert_lines']) > 0:
+            prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
+            while len(params['vert_lines']) > 0:
+                params['ax'].lines.remove(params['vert_lines'][0][0])
+                params['vert_lines'].pop(0)
+        if prev_xdata == xdata:  # lines removed
+            params['vertline_t'].set_text('')
+            params['plot_fun']()
+            return
+        ylim = params['ax'].get_ylim()
+        for epoch_idx in range(params['n_epochs']):  # plot lines
+            pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
+            params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
+                                                          zorder=4))
+        params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
+        params['plot_fun']()
+
+
+def _plot_onkey(event, params):
+    """Function to handle key presses."""
+    import matplotlib.pyplot as plt
+    if event.key == 'down':
+        if params['butterfly']:
+            return
+        params['ch_start'] += params['n_channels']
+        _channels_changed(params, len(params['ch_names']))
+    elif event.key == 'up':
+        if params['butterfly']:
+            return
+        params['ch_start'] -= params['n_channels']
+        _channels_changed(params, len(params['ch_names']))
+    elif event.key == 'left':
+        sample = params['t_start'] - params['duration']
+        sample = np.max([0, sample])
+        _plot_window(sample, params)
+    elif event.key == 'right':
+        sample = params['t_start'] + params['duration']
+        sample = np.min([sample, params['times'][-1] - params['duration']])
+        times = params['epoch_times']
+        xdata = times.flat[np.abs(times - sample).argmin()]
+        _plot_window(xdata, params)
+    elif event.key == '-':
+        if params['butterfly']:
+            params['butterfly_scale'] /= 1.1
+        else:
+            params['scale_factor'] /= 1.1
+        params['plot_fun']()
+    elif event.key in ['+', '=']:
+        if params['butterfly']:
+            params['butterfly_scale'] *= 1.1
+        else:
+            params['scale_factor'] *= 1.1
+        params['plot_fun']()
+    elif event.key == 'f11':
+        mng = plt.get_current_fig_manager()
+        mng.full_screen_toggle()
+    elif event.key == 'pagedown':
+        if params['n_channels'] == 1 or params['butterfly']:
+            return
+        n_channels = params['n_channels'] - 1
+        ylim = params['ax'].get_ylim()
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].collections.pop()
+        params['ax'].set_yticks(params['offsets'])
+        params['lines'].pop()
+        params['vsel_patch'].set_height(n_channels)
+        params['plot_fun']()
+    elif event.key == 'pageup':
+        if params['butterfly']:
+            return
+        from matplotlib.collections import LineCollection
+        n_channels = params['n_channels'] + 1
+        ylim = params['ax'].get_ylim()
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                            zorder=2, picker=3.)
+        params['ax'].add_collection(lc)
+        params['ax'].set_yticks(params['offsets'])
+        params['lines'].append(lc)
+        params['vsel_patch'].set_height(n_channels)
+        params['plot_fun']()
+    elif event.key == 'home':
+        n_epochs = params['n_epochs'] - 1
+        if n_epochs <= 0:
+            return
+        n_times = len(params['epochs'].times)
+        ticks = params['epoch_times'] + 0.5 * n_times
+        params['ax2'].set_xticks(ticks[:n_epochs])
+        params['n_epochs'] = n_epochs
+        params['duration'] -= n_times
+        params['hsel_patch'].set_width(params['duration'])
+        params['plot_fun']()
+    elif event.key == 'end':
+        n_epochs = params['n_epochs'] + 1
+        n_times = len(params['epochs'].times)
+        if n_times * n_epochs > len(params['data'][0]):
+            return
+        if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
+            params['t_start'] -= n_times
+            params['hsel_patch'].set_x(params['t_start'])
+        ticks = params['epoch_times'] + 0.5 * n_times
+        params['ax2'].set_xticks(ticks[:n_epochs])
+        params['n_epochs'] = n_epochs
+        if len(params['vert_lines']) > 0:
+            ax = params['ax']
+            pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
+            params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
+                                                zorder=3))
+        params['duration'] += n_times
+        if params['t_start'] + params['duration'] > len(params['data'][0]):
+            params['t_start'] -= n_times
+            params['hsel_patch'].set_x(params['t_start'])
+        params['hsel_patch'].set_width(params['duration'])
+        params['plot_fun']()
+    elif event.key == 'b':
+        if params['fig_options'] is not None:
+            plt.close(params['fig_options'])
+            params['fig_options'] = None
+        _prepare_butterfly(params)
+        _plot_traces(params)
+    elif event.key == 'o':
+        if not params['butterfly']:
+            _open_options(params)
+    elif event.key == 'h':
+        _plot_histogram(params)
+    elif event.key == '?':
+        _onclick_help(event, params)
+    elif event.key == 'escape':
+        plt.close(params['fig'])
+
+
+def _prepare_butterfly(params):
+    """Helper function for setting up butterfly plot."""
+    from matplotlib.collections import LineCollection
+    butterfly = not params['butterfly']
+    if butterfly:
+        types = set(['grad', 'mag', 'eeg', 'eog',
+                     'ecg']) & set(params['types'])
+        if len(types) < 1:
+            return
+        params['ax_vscroll'].set_visible(False)
+        ax = params['ax']
+        labels = ax.yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(True)
+        ylim = (5. * len(types), 0.)
+        ax.set_ylim(ylim)
+        offset = ylim[0] / (4. * len(types))
+        ticks = np.arange(0, ylim[0], offset)
+        ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
+        ax.set_yticks(ticks)
+        used_types = 0
+        params['offsets'] = [ticks[2]]
+        if 'grad' in types:
+            pos = (0, 1 - (ticks[2] / ylim[0]))
+            params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'mag' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'eeg' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'eog' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+        params['offsets'].append(ticks[2 + used_types * 4])
+        if 'ecg' in types:
+            pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
+            params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
+                                   ha='left', size=12, va='center',
+                                   xycoords='axes fraction', rotation=90,
+                                   textcoords='offset points')
+            used_types += 1
+
+        while len(params['lines']) < len(params['picks']):
+            lc = LineCollection(list(), antialiased=False, linewidths=0.5,
+                                zorder=2, picker=3.)
+            ax.add_collection(lc)
+            params['lines'].append(lc)
+    else:  # change back to default view
+        labels = params['ax'].yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][0])
+        params['ax_vscroll'].set_visible(True)
+        while len(params['ax2'].texts) > 0:
+            params['ax2'].texts.pop()
+        n_channels = params['n_channels']
+        while len(params['lines']) > n_channels:
+            params['ax'].collections.pop()
+            params['lines'].pop()
+        ylim = (25., 0.)
+        params['ax'].set_ylim(ylim)
+        offset = ylim[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['ax'].set_yticks(params['offsets'])
+    params['butterfly'] = butterfly
+
+
+def _onpick(event, params):
+    """Helper to add a channel name on click"""
+    if event.mouseevent.button != 2 or not params['butterfly']:
+        return  # text label added with a middle mouse button
+    lidx = np.where([l is event.artist for l in params['lines']])[0][0]
+    text = params['text']
+    text.set_x(event.mouseevent.xdata)
+    text.set_y(event.mouseevent.ydata)
+    text.set_text(params['ch_names'][lidx])
+    text.set_visible(True)
+    # do NOT redraw here, since for butterfly plots hundreds of lines could
+    # potentially be picked -- use _mouse_click (happens once per click)
+    # to do the drawing
+
+
+def _close_event(event, params):
+    """Function to drop selected bad epochs. Called on closing of the plot."""
+    params['epochs'].drop_epochs(params['bads'])
+    logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
+    params['epochs'].info['bads'] = params['info']['bads']
+
+
+def _resize_event(event, params):
+    """Function to handle resize event"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_figure(params)
+
+
+def _update_channels_epochs(event, params):
+    """Function for changing the amount of channels and epochs per view."""
+    from matplotlib.collections import LineCollection
+    # Channels
+    n_channels = int(np.around(params['channel_slider'].val))
+    offset = params['ax'].get_ylim()[0] / n_channels
+    params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+    while len(params['lines']) > n_channels:
+        params['ax'].collections.pop()
+        params['lines'].pop()
+    while len(params['lines']) < n_channels:
+        lc = LineCollection(list(), linewidths=0.5, antialiased=False,
+                            zorder=2, picker=3.)
+        params['ax'].add_collection(lc)
+        params['lines'].append(lc)
+    params['ax'].set_yticks(params['offsets'])
+    params['vsel_patch'].set_height(n_channels)
+    params['n_channels'] = n_channels
+
+    # Epochs
+    n_epochs = int(np.around(params['epoch_slider'].val))
+    n_times = len(params['epochs'].times)
+    ticks = params['epoch_times'] + 0.5 * n_times
+    params['ax2'].set_xticks(ticks[:n_epochs])
+    params['n_epochs'] = n_epochs
+    params['duration'] = n_times * n_epochs
+    params['hsel_patch'].set_width(params['duration'])
+    if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
+        params['t_start'] = len(params['data'][0]) - n_times * n_epochs
+        params['hsel_patch'].set_x(params['t_start'])
+    _plot_traces(params)
+
+
+def _toggle_labels(label, params):
+    """Function for toggling axis labels on/off."""
+    if label == 'Channel names visible':
+        params['settings'][0] = not params['settings'][0]
+        labels = params['ax'].yaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][0])
+    elif label == 'Event-id visible':
+        params['settings'][1] = not params['settings'][1]
+        labels = params['ax2'].xaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][1])
+    elif label == 'Epoch-id visible':
+        params['settings'][2] = not params['settings'][2]
+        labels = params['ax'].xaxis.get_ticklabels()
+        for label in labels:
+            label.set_visible(params['settings'][2])
+    elif label == 'Zeroline visible':
+        params['settings'][3] = not params['settings'][3]
+        _plot_vert_lines(params)
+    params['fig'].canvas.draw()
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _open_options(params):
+    """Function for opening the option window."""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    if params['fig_options'] is not None:
+        # turn off options dialog
+        plt.close(params['fig_options'])
+        params['fig_options'] = None
+        return
+    width = 10
+    height = 3
+    fig_options = figure_nobar(figsize=(width, height), dpi=80)
+    fig_options.canvas.set_window_title('View settings')
+    params['fig_options'] = fig_options
+    ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
+    ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
+    ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
+    ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
+    plt.axis('off')
+    params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
+    params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
+                                                  len(params['ch_names']),
+                                                  valfmt='%0.0f',
+                                                  valinit=params['n_channels'])
+    params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
+                                                len(params['epoch_times']),
+                                                valfmt='%0.0f',
+                                                valinit=params['n_epochs'])
+    params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
+                                                  ['Channel names visible',
+                                                   'Event-id visible',
+                                                   'Epoch-id visible',
+                                                   'Zeroline visible'],
+                                                  actives=params['settings'])
+    update = partial(_update_channels_epochs, params=params)
+    params['update_button'].on_clicked(update)
+    labels_callback = partial(_toggle_labels, params=params)
+    params['checkbox'].on_clicked(labels_callback)
+    close_callback = partial(_settings_closed, params=params)
+    params['fig_options'].canvas.mpl_connect('close_event', close_callback)
+    try:
+        params['fig_options'].canvas.draw()
+        params['fig_options'].show()
+        if params['fig_proj'] is not None:
+            params['fig_proj'].canvas.draw()
+    except Exception:
+        pass
+
+
+def _settings_closed(events, params):
+    """Function to handle close event from settings dialog."""
+    params['fig_options'] = None
+
+
+def _plot_histogram(params):
+    """Function for plotting histogram of peak-to-peak values."""
+    import matplotlib.pyplot as plt
+    epochs = params['epochs']
+    p2p = np.ptp(epochs.get_data(), axis=2)
+    types = list()
+    data = list()
+    if 'eeg' in params['types']:
+        eegs = np.array([p2p.T[i] for i,
+                         x in enumerate(params['types']) if x == 'eeg'])
+        data.append(eegs.ravel())
+        types.append('eeg')
+    if 'mag' in params['types']:
+        mags = np.array([p2p.T[i] for i,
+                         x in enumerate(params['types']) if x == 'mag'])
+        data.append(mags.ravel())
+        types.append('mag')
+    if 'grad' in params['types']:
+        grads = np.array([p2p.T[i] for i,
+                          x in enumerate(params['types']) if x == 'grad'])
+        data.append(grads.ravel())
+        types.append('grad')
+    fig = plt.figure(len(types))
+    fig.clf()
+    scalings = _handle_default('scalings')
+    units = _handle_default('units')
+    titles = _handle_default('titles')
+    colors = _handle_default('color')
+    for idx in range(len(types)):
+        ax = plt.subplot(len(types), 1, idx + 1)
+        plt.xlabel(units[types[idx]])
+        plt.ylabel('count')
+        color = colors[types[idx]]
+        rej = None
+        if epochs.reject is not None and types[idx] in epochs.reject.keys():
+                rej = epochs.reject[types[idx]] * scalings[types[idx]]
+                rng = [0., rej * 1.1]
+        else:
+            rng = None
+        plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
+                 range=rng)
+        if rej is not None:
+            ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
+        plt.title(titles[types[idx]])
+    fig.suptitle('Peak-to-peak histogram', y=0.99)
+    fig.subplots_adjust(hspace=0.6)
+    try:
+        fig.show()
+    except:
+        pass
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
+
+
+def _label2idx(params, pos):
+    """Aux function for click on labels. Returns channel name and idx."""
+    labels = params['ax'].yaxis.get_ticklabels()
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1])
+    text = labels[line_idx].get_text()
+    if len(text) == 0:
+        return None, None
+    ch_idx = params['ch_start'] + line_idx
+    return text, ch_idx
diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py
index 22bc620..f929fd5 100644
--- a/mne/viz/evoked.py
+++ b/mne/viz/evoked.py
@@ -15,16 +15,114 @@ from itertools import cycle
 
 import numpy as np
 
-from ..io.pick import channel_type
+from ..io.pick import channel_type, pick_types, _picks_by_type
 from ..externals.six import string_types
-from .utils import _mutable_defaults, _check_delayed_ssp
-from .utils import _draw_proj_checkbox, tight_layout
+from ..defaults import _handle_default
+from .utils import _draw_proj_checkbox, tight_layout, _check_delayed_ssp
+from ..utils import logger
+from ..fixes import partial
+from ..io.pick import pick_info
+from .topo import _plot_evoked_topo
+from .topomap import _prepare_topo_plot, plot_topomap
+
+
+def _butterfly_onpick(event, params):
+    """Helper to add a channel name on click"""
+    params['need_draw'] = True
+    ax = event.artist.get_axes()
+    ax_idx = np.where([ax is a for a in params['axes']])[0][0]
+    lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
+    ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
+    text = params['texts'][ax_idx]
+    x = event.artist.get_xdata()[event.ind[0]]
+    y = event.artist.get_ydata()[event.ind[0]]
+    text.set_x(x)
+    text.set_y(y)
+    text.set_text(ch_name)
+    text.set_color(event.artist.get_color())
+    text.set_alpha(1.)
+    text.set_path_effects(params['path_effects'])
+    # do NOT redraw here, since for butterfly plots hundreds of lines could
+    # potentially be picked -- use on_button_press (happens once per click)
+    # to do the drawing
+
+
+def _butterfly_on_button_press(event, params):
+    """Helper to only draw once for picking"""
+    if params['need_draw']:
+        event.canvas.draw()
+    else:
+        idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
+        if len(idx) == 1:
+            text = params['texts'][idx[0]]
+            text.set_alpha(0.)
+            text.set_path_effects([])
+            event.canvas.draw()
+    params['need_draw'] = False
+
+
+def _butterfly_onselect(xmin, xmax, ch_types, evoked, text=None):
+    """Function for drawing topomaps from the selected area."""
+    import matplotlib.pyplot as plt
+    vert_lines = list()
+    if text is not None:
+        text.set_visible(True)
+        ax = text.axes
+        ylim = ax.get_ylim()
+        vert_lines.append(ax.plot([xmin, xmin], ylim, zorder=0, color='red'))
+        vert_lines.append(ax.plot([xmax, xmax], ylim, zorder=0, color='red'))
+        fill = ax.fill_betweenx(ylim, x1=xmin, x2=xmax, alpha=0.2,
+                                color='green')
+        evoked_fig = plt.gcf()
+        evoked_fig.canvas.draw()
+        evoked_fig.canvas.flush_events()
+    times = evoked.times
+    xmin *= 0.001
+    minidx = np.abs(times - xmin).argmin()
+    xmax *= 0.001
+    maxidx = np.abs(times - xmax).argmin()
+    fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
+                              figsize=(3 * len(ch_types), 3))
+    for idx, ch_type in enumerate(ch_types):
+        picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(evoked,
+                                                                 ch_type,
+                                                                 layout=None)
+        data = evoked.data[picks, minidx:maxidx]
+        if merge_grads:
+            from ..channels.layout import _merge_grad_data
+            data = _merge_grad_data(data)
+            title = '%s RMS' % ch_type
+        else:
+            title = ch_type
+        data = np.average(data, axis=1)
+        axarr[0][idx].set_title(title)
+        plot_topomap(data, pos, axis=axarr[0][idx], show=False)
+
+    fig.suptitle('Average over %.2fs - %.2fs' % (xmin, xmax), fontsize=15,
+                 y=0.1)
+    tight_layout(pad=2.0, fig=fig)
+    plt.show()
+    if text is not None:
+        text.set_visible(False)
+        close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
+                                 fill=fill)
+        fig.canvas.mpl_connect('close_event', close_callback)
+        evoked_fig.canvas.draw()
+        evoked_fig.canvas.flush_events()
+
+
+def _topo_closed(events, ax, lines, fill):
+    """Callback for removing lines from evoked plot as topomap is closed."""
+    for line in lines:
+        ax.lines.remove(line[0])
+    ax.collections.remove(fill)
+    ax.get_figure().canvas.draw()
 
 
 def _plot_evoked(evoked, picks, exclude, unit, show,
                  ylim, proj, xlim, hline, units,
                  scalings, titles, axes, plot_type,
-                 cmap=None):
+                 cmap=None, gfp=False):
     """Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
 
     Extra param is:
@@ -37,16 +135,19 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
         interactive.
     """
     import matplotlib.pyplot as plt
+    from matplotlib import patheffects
+    from matplotlib.widgets import SpanSelector
     if axes is not None and proj == 'interactive':
         raise RuntimeError('Currently only single axis figures are supported'
                            ' for interactive SSP selection.')
+    if isinstance(gfp, string_types) and gfp != 'only':
+        raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
 
-    scalings, titles, units = _mutable_defaults(('scalings', scalings),
-                                                ('titles', titles),
-                                                ('units', units))
-
-    channel_types = set(key for d in [scalings, titles, units] for key in d)
-    channel_types = sorted(channel_types)  # to guarantee consistent order
+    scalings = _handle_default('scalings', scalings)
+    titles = _handle_default('titles', titles)
+    units = _handle_default('units', units)
+    # Valid data types ordered for consistency
+    channel_types = ['eeg', 'grad', 'mag', 'seeg']
 
     if picks is None:
         picks = list(range(evoked.info['nchan']))
@@ -56,16 +157,17 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
     if len(exclude) > 0:
         if isinstance(exclude, string_types) and exclude == 'bads':
             exclude = bad_ch_idx
-        elif (isinstance(exclude, list)
-              and all([isinstance(ch, string_types) for ch in exclude])):
+        elif (isinstance(exclude, list) and
+              all(isinstance(ch, string_types) for ch in exclude)):
             exclude = [evoked.ch_names.index(ch) for ch in exclude]
         else:
             raise ValueError('exclude has to be a list of channel names or '
                              '"bads"')
 
         picks = list(set(picks).difference(exclude))
+    picks = np.array(picks)
 
-    types = [channel_type(evoked.info, idx) for idx in picks]
+    types = np.array([channel_type(evoked.info, idx) for idx in picks])
     n_channel_types = 0
     ch_types_used = []
     for t in channel_types:
@@ -96,18 +198,27 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
         evoked = evoked.copy()
         evoked.apply_proj()
 
-    times = 1e3 * evoked.times  # time in miliseconds
+    times = 1e3 * evoked.times  # time in milliseconds
+    texts = list()
+    idxs = list()
+    lines = list()
+    selectors = list()  # for keeping reference to span_selectors
+    path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
+                                           alpha=0.75)]
+    gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
+                                               alpha=0.75)]
     for ax, t in zip(axes, ch_types_used):
         ch_unit = units[t]
         this_scaling = scalings[t]
         if unit is False:
             this_scaling = 1.0
             ch_unit = 'NA'  # no unit
-        idx = [picks[i] for i in range(len(picks)) if types[i] == t]
+        idx = list(picks[types == t])
+        idxs.append(idx)
         if len(idx) > 0:
             # Parameters for butterfly interactive plots
             if plot_type == 'butterfly':
-                if any([i in bad_ch_idx for i in idx]):
+                if any(i in bad_ch_idx for i in idx):
                     colors = ['k'] * len(idx)
                     for i in bad_ch_idx:
                         if i in idx:
@@ -116,16 +227,62 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
                     ax._get_lines.color_cycle = iter(colors)
                 else:
                     ax._get_lines.color_cycle = cycle(['k'])
+                text = ax.annotate('Loading...', xy=(0.01, 0.1),
+                                   xycoords='axes fraction', fontsize=20,
+                                   color='green')
+                text.set_visible(False)
+                callback_onselect = partial(_butterfly_onselect,
+                                            ch_types=ch_types_used,
+                                            evoked=evoked, text=text)
+                blit = False if plt.get_backend() == 'MacOSX' else True
+                selectors.append(SpanSelector(ax, callback_onselect,
+                                              'horizontal', minspan=10,
+                                              useblit=blit,
+                                              rectprops=dict(alpha=0.5,
+                                                             facecolor='red')))
             # Set amplitude scaling
             D = this_scaling * evoked.data[idx, :]
-            # plt.axes(ax)
             if plot_type == 'butterfly':
-                ax.plot(times, D.T)
+                gfp_only = (isinstance(gfp, string_types) and gfp == 'only')
+                if not gfp_only:
+                    lines.append(ax.plot(times, D.T, picker=3., zorder=0))
+                    for ii, line in zip(idx, lines[-1]):
+                        if ii in bad_ch_idx:
+                            line.set_zorder(1)
+                if gfp:  # 'only' or boolean True
+                    gfp_color = (0., 1., 0.)
+                    this_gfp = np.sqrt((D * D).mean(axis=0))
+                    this_ylim = ax.get_ylim()
+                    if not gfp_only:
+                        y_offset = this_ylim[0]
+                    else:
+                        y_offset = 0.
+                    this_gfp += y_offset
+                    ax.fill_between(times, y_offset, this_gfp, color='none',
+                                    facecolor=gfp_color, zorder=0, alpha=0.25)
+                    ax.plot(times, this_gfp, color=gfp_color, zorder=2)
+                    ax.text(times[0] + 0.01 * (times[-1] - times[0]),
+                            this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
+                            'GFP', zorder=3, color=gfp_color,
+                            path_effects=gfp_path_effects)
+                ax.set_ylabel('data (%s)' % ch_unit)
+                # for old matplotlib, we actually need this to have a bounding
+                # box (!), so we have to put some valid text here, change
+                # alpha and  path effects later
+                texts.append(ax.text(0, 0, 'blank', zorder=2,
+                                     verticalalignment='baseline',
+                                     horizontalalignment='left',
+                                     fontweight='bold', alpha=0))
             elif plot_type == 'image':
                 im = ax.imshow(D, interpolation='nearest', origin='lower',
                                extent=[times[0], times[-1], 0, D.shape[0]],
                                aspect='auto', cmap=cmap)
-                plt.colorbar(im, ax=ax)
+                cbar = plt.colorbar(im, ax=ax)
+                cbar.ax.set_title(ch_unit)
+                ax.set_ylabel('channels (%s)' % 'index')
+            else:
+                raise ValueError("plot_type has to be 'butterfly' or 'image'."
+                                 "Got %s." % plot_type)
             if xlim is not None:
                 if xlim == 'tight':
                     xlim = (times[0], times[-1])
@@ -138,17 +295,19 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
             ax.set_title(titles[t] + ' (%d channel%s)' % (
                          len(D), 's' if len(D) > 1 else ''))
             ax.set_xlabel('time (ms)')
-            if plot_type == 'butterfly':
-                ax.set_ylabel('data (%s)' % ch_unit)
-            elif plot_type == 'image':
-                ax.set_ylabel('channels (%s)' % ch_unit)
-            else:
-                raise ValueError("plot_type has to be 'butterfly' or 'image'."
-                                 "Got %s." % plot_type)
 
             if (plot_type == 'butterfly') and (hline is not None):
                 for h in hline:
                     ax.axhline(h, color='r', linestyle='--', linewidth=2)
+    if plot_type == 'butterfly':
+        params = dict(axes=axes, texts=texts, lines=lines,
+                      ch_names=evoked.ch_names, idxs=idxs, need_draw=False,
+                      path_effects=path_effects, selectors=selectors)
+        fig.canvas.mpl_connect('pick_event',
+                               partial(_butterfly_onpick, params=params))
+        fig.canvas.mpl_connect('button_press_event',
+                               partial(_butterfly_on_button_press,
+                                       params=params))
 
     if axes_init is None:
         plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
@@ -171,10 +330,13 @@ def _plot_evoked(evoked, picks, exclude, unit, show,
 
 
 def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
-                ylim=None, proj=False, xlim='tight', hline=None, units=None,
-                scalings=None, titles=None, axes=None, plot_type="butterfly"):
+                ylim=None, xlim='tight', proj=False, hline=None, units=None,
+                scalings=None, titles=None, axes=None, gfp=False):
     """Plot evoked data
 
+    Left click to a line shows the channel name. Selecting an area by clicking
+    and holding left mouse button plots a topographic map of the painted area.
+
     Note: If bad channels are not excluded they are shown in red.
 
     Parameters
@@ -189,7 +351,7 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
     unit : bool
         Scale plot with channel (SI) unit.
     show : bool
-        Call pyplot.show() as the end or not.
+        Show figure if True.
     ylim : dict | None
         ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
         Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
@@ -215,15 +377,89 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
         The axes to plot to. If list, the list must be a list of Axes of
         the same length as the number of channel types. If instance of
         Axes, there must be only one channel type plotted.
+    gfp : bool | 'only'
+        Plot GFP in green if True or "only". If "only", then the individual
+        channel traces will not be shown.
     """
     return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
                         show=show, ylim=ylim, proj=proj, xlim=xlim,
                         hline=hline, units=units, scalings=scalings,
-                        titles=titles, axes=axes, plot_type="butterfly")
+                        titles=titles, axes=axes, plot_type="butterfly",
+                        gfp=gfp)
+
+
+def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
+                     border='none', ylim=None, scalings=None, title=None,
+                     proj=False, vline=[0.0], fig_facecolor='k',
+                     fig_background=None, axis_facecolor='k', font_color='w',
+                     show=True):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+    return _plot_evoked_topo(evoked=evoked, layout=layout,
+                             layout_scale=layout_scale, color=color,
+                             border=border, ylim=ylim, scalings=scalings,
+                             title=title, proj=proj, vline=vline,
+                             fig_facecolor=fig_facecolor,
+                             fig_background=fig_background,
+                             axis_facecolor=axis_facecolor,
+                             font_color=font_color, show=show)
 
 
 def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
-                      clim=None, proj=False, xlim='tight', units=None,
+                      clim=None, xlim='tight', proj=False, units=None,
                       scalings=None, titles=None, axes=None, cmap='RdBu_r'):
     """Plot evoked data as images
 
@@ -239,7 +475,7 @@ def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
     unit : bool
         Scale plot with channel (SI) unit.
     show : bool
-        Call pyplot.show() as the end or not.
+        Show figure if True.
     clim : dict | None
         clim for plots. e.g. clim = dict(eeg=[-200e-6, 200e6])
         Valid keys are eeg, mag, grad, misc. If None, the clim parameter
@@ -290,7 +526,284 @@ def _plot_update_evoked(params, bools):
         idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
         D = this_scaling * new_evoked.data[idx, :]
         if params['plot_type'] == 'butterfly':
-            [line.set_data(times, di) for line, di in zip(ax.lines, D)]
+            for line, di in zip(ax.lines, D):
+                line.set_data(times, di)
         else:
             ax.images[0].set_data(D)
     params['fig'].canvas.draw()
+
+
+def plot_evoked_white(evoked, noise_cov, show=True):
+    """Plot whitened evoked response
+
+    Plots the whitened evoked response and the whitened GFP as described in
+    [1]. If one single covariance object is passed, the GFP panel (bottom)
+    will depict different sensor types. If multiple covariance objects are
+    passed as a list, the left column will display the whitened evoked
+    responses for each channel based on the whitener from the noise covariance
+    that has the highest log-likelihood. The left column will depict the
+    whitened GFPs based on each estimator separately for each sensor type.
+    Instead of numbers of channels the GFP display shows the estimated rank.
+    Note. The rank estimation will be printed by the logger for each noise
+    covariance estimator that is passed.
+
+    Parameters
+    ----------
+    evoked : instance of mne.Evoked
+        The evoked response.
+    noise_cov : list | instance of Covariance | str
+        The noise covariance as computed by ``mne.cov.compute_covariance``.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    References
+    ----------
+    [1] Engemann D. and Gramfort A. (2015) Automated model selection in
+        covariance estimation and spatial whitening of MEG and EEG signals,
+        vol. 108, 328-342, NeuroImage.
+    """
+    return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
+                              scalings=None, rank=None, show=show)
+
+
+def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True):
+    """helper to plot_evoked_white
+
+    Additional Paramter
+    -------------------
+    scalings : dict | None
+        The rescaling method to be applied to improve the accuracy of rank
+        estimaiton. If dict, it will override the following default values
+        (used if None):
+
+            dict(mag=1e12, grad=1e11, eeg=1e5)
+
+        Note. Theses values were tested on different datests across various
+        conditions. You should not need to update them.
+
+    rank : dict of int | None
+        Dict of ints where keys are 'eeg', 'mag' or 'grad'. If None,
+        the rank is detected automatically. Defaults to None. Note.
+        The rank estimation will be printed by the logger for each noise
+        covariance estimator that is passed.
+
+    """
+
+    from ..cov import whiten_evoked, read_cov  # recursive import
+    from ..cov import _estimate_rank_meeg_cov
+    import matplotlib.pyplot as plt
+    if scalings is None:
+        scalings = dict(mag=1e12, grad=1e11, eeg=1e5)
+
+    ch_used = [ch for ch in ['eeg', 'grad', 'mag'] if ch in evoked]
+    has_meg = 'mag' in ch_used and 'grad' in ch_used
+
+    if isinstance(noise_cov, string_types):
+        noise_cov = read_cov(noise_cov)
+    if not isinstance(noise_cov, (list, tuple)):
+        noise_cov = [noise_cov]
+
+    proc_history = evoked.info.get('proc_history', [])
+    has_sss = False
+    if len(proc_history) > 0:
+        # if SSSed, mags and grad are not longer independent
+        # for correct display of the whitening we will drop the cross-terms
+        # (the gradiometer * magnetometer covariance)
+        has_sss = 'max_info' in proc_history[0] and has_meg
+    if has_sss:
+        logger.info('SSS has been applied to data. Showing mag and grad '
+                    'whitening jointly.')
+
+    evoked = evoked.copy()  # handle ref meg
+    evoked.info['projs'] = []  # either applied already or not-- else issue
+
+    picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
+                       exclude='bads')
+    evoked.pick_channels([evoked.ch_names[k] for k in picks], copy=False)
+    # important to re-pick. will otherwise crash on systems with ref channels
+    # as first sensor block
+    picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
+                       exclude='bads')
+
+    picks_list = _picks_by_type(evoked.info, meg_combined=has_sss)
+    if has_meg and has_sss:
+        # reduce ch_used to combined mag grad
+        ch_used = list(zip(*picks_list))[0]
+    # order pick list by ch_used (required for compat with plot_evoked)
+    picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
+    n_ch_used = len(ch_used)
+
+    # make sure we use the same rank estimates for GFP and whitening
+    rank_list = []
+    for cov in noise_cov:
+        rank_ = {}
+        C = cov['data'].copy()
+        picks_list2 = [k for k in picks_list]
+        if rank is None:
+            if has_meg and not has_sss:
+                picks_list2 += _picks_by_type(evoked.info,
+                                              meg_combined=True)
+            for ch_type, this_picks in picks_list2:
+                this_info = pick_info(evoked.info, this_picks)
+                idx = np.ix_(this_picks, this_picks)
+                this_rank = _estimate_rank_meeg_cov(C[idx], this_info,
+                                                    scalings)
+                rank_[ch_type] = this_rank
+        if rank is not None:
+            rank_.update(rank)
+        rank_list.append(rank_)
+    evokeds_white = [whiten_evoked(evoked, n, picks, rank=r)
+                     for n, r in zip(noise_cov, rank_list)]
+
+    axes_evoked = None
+
+    def whitened_gfp(x, rank=None):
+        """Whitened Global Field Power
+
+        The MNE inverse solver assumes zero mean whitened data as input.
+        Therefore, a chi^2 statistic will be best to detect model violations.
+        """
+        return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
+
+    # prepare plot
+    if len(noise_cov) > 1:
+        n_columns = 2
+        n_extra_row = 0
+    else:
+        n_columns = 1
+        n_extra_row = 1
+
+    n_rows = n_ch_used + n_extra_row
+    fig, axes = plt.subplots(n_rows,
+                             n_columns, sharex=True, sharey=False,
+                             figsize=(8.8, 2.2 * n_rows))
+    if n_columns > 1:
+        suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
+                    'and global field power '
+                    '(right, comparison of estimators)' %
+                    noise_cov[0].get('method', 'empirical'))
+        fig.suptitle(suptitle)
+
+    ax_gfp = None
+    if any(((n_columns == 1 and n_ch_used == 1),
+            (n_columns == 1 and n_ch_used > 1),
+            (n_columns == 2 and n_ch_used == 1))):
+        axes_evoked = axes[:n_ch_used]
+        ax_gfp = axes[-1:]
+    elif n_columns == 2 and n_ch_used > 1:
+        axes_evoked = axes[:n_ch_used, 0]
+        ax_gfp = axes[:, 1]
+    else:
+        raise RuntimeError('Wrong axes inputs')
+
+    times = evoked.times * 1e3
+    titles_ = _handle_default('titles')
+    if has_sss:
+        titles_['meg'] = 'MEG (combined)'
+
+    colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
+    ch_colors = {'eeg': 'black', 'mag': 'blue', 'grad': 'cyan',
+                 'meg': 'steelblue'}
+    iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
+
+    if not has_sss:
+        evokeds_white[0].plot(unit=False, axes=axes_evoked,
+                              hline=[-1.96, 1.96], show=False)
+    else:
+        for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
+            ax.plot(times, evokeds_white[0].data[picks].T, color='k')
+            for hline in [-1.96, 1.96]:
+                ax.axhline(hline, color='red', linestyle='--')
+
+    # Now plot the GFP
+    for evoked_white, noise_cov, rank_, color in iter_gfp:
+        i = 0
+        for ch, sub_picks in picks_list:
+            this_rank = rank_[ch]
+            title = '{0} ({2}{1})'.format(
+                    titles_[ch] if n_columns > 1 else ch,
+                    this_rank, 'rank ' if n_columns > 1 else '')
+            label = noise_cov.get('method', 'empirical')
+
+            ax_gfp[i].set_title(title if n_columns > 1 else
+                                'whitened global field power (GFP),'
+                                ' method = "%s"' % label)
+
+            data = evoked_white.data[sub_picks]
+            gfp = whitened_gfp(data, rank=this_rank)
+            ax_gfp[i].plot(times, gfp,
+                           label=(label if n_columns > 1 else title),
+                           color=color if n_columns > 1 else ch_colors[ch])
+            ax_gfp[i].set_xlabel('times [ms]')
+            ax_gfp[i].set_ylabel('GFP [chi^2]')
+            ax_gfp[i].set_xlim(times[0], times[-1])
+            ax_gfp[i].set_ylim(0, 10)
+            ax_gfp[i].axhline(1, color='red', linestyle='--')
+            if n_columns > 1:
+                i += 1
+
+    ax = ax_gfp[0]
+    if n_columns == 1:
+        ax.legend(  # mpl < 1.2.1 compatibility: use prop instead of fontsize
+            loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
+    else:
+        ax.legend(loc='upper right', prop=dict(size=10))
+        params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
+                      bottom=[0.22, 0.13, 0.09][n_rows - 1])
+        if has_sss:
+            params['hspace'] = 0.49
+        fig.subplots_adjust(**params)
+    fig.canvas.draw()
+
+    if show is True:
+        plt.show()
+    return fig
+
+
+def plot_snr_estimate(evoked, inv, show=True):
+    """Plot a data SNR estimate
+
+    Parameters
+    ----------
+    evoked : instance of Evoked
+        The evoked instance. This should probably be baseline-corrected.
+    inv : instance of InverseOperator
+        The minimum-norm inverse operator.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import matplotlib.pyplot as plt
+    from ..minimum_norm import estimate_snr
+    snr, snr_est = estimate_snr(evoked, inv, verbose=True)
+    fig, ax = plt.subplots(1, 1)
+    lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
+    ax.plot([0, 0], lims[2:], 'k:')
+    ax.plot(lims[:2], [0, 0], 'k:')
+    # Colors are "bluish green" and "vermillion" taken from:
+    #  http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
+    ax.plot(evoked.times, snr_est, color=[0.0, 0.6, 0.5])
+    ax.plot(evoked.times, snr, color=[0.8, 0.4, 0.0])
+    ax.set_xlim(lims[:2])
+    ax.set_ylim(lims[2:])
+    ax.set_ylabel('SNR')
+    ax.set_xlabel('Time (sec)')
+    if evoked.comment is not None:
+        ax.set_title(evoked.comment)
+    plt.draw()
+    if show:
+        plt.show()
+    return fig
diff --git a/mne/viz/ica.py b/mne/viz/ica.py
index 302072f..122fd7c 100644
--- a/mne/viz/ica.py
+++ b/mne/viz/ica.py
@@ -4,6 +4,7 @@ from __future__ import print_function
 
 # Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
 #
 # License: Simplified BSD
 
@@ -11,8 +12,17 @@ from functools import partial
 
 import numpy as np
 
-from ..utils import deprecated
-from .utils import tight_layout, _prepare_trellis
+from .utils import tight_layout, _prepare_trellis, _select_bads
+from .utils import _layout_figure, _plot_raw_onscroll, _mouse_click
+from .utils import _helper_raw_resize, _plot_raw_onkey
+from .raw import _prepare_mne_browse_raw, _plot_raw_traces
+from .epochs import _prepare_mne_browse_epochs
+from .evoked import _butterfly_on_button_press, _butterfly_onpick
+from .topomap import _prepare_topo_plot, plot_topomap
+from ..utils import logger
+from ..defaults import _handle_default
+from ..io.meas_info import create_info
+from ..io.pick import pick_types
 
 
 def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
@@ -39,47 +49,8 @@ def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
         raise err
 
 
- at deprecated('`plot_ica_panel` is deprecated and will be removed in '
-            'MNE 1.0. Use `plot_ica_sources` instead')
-def plot_ica_panel(sources, start=None, stop=None,
-                   source_idx=None, ncol=3, verbose=None,
-                   title=None, show=True):
-    """Create panel plots of ICA sources
-
-    Clicking on the plot of an individual source opens a new figure showing
-    the source.
-
-    Parameters
-    ----------
-    sources : ndarray
-        Sources as drawn from ica.get_sources.
-    start : int
-        x-axis start index. If None from the beginning.
-    stop : int
-        x-axis stop index. If None to the end.
-    source_idx : array-like
-        Indices for subsetting the sources.
-    ncol : int
-        Number of panel-columns.
-    title : str
-        The figure title. If None a default is provided.
-    verbose : bool, str, int, or None
-        If not None, override default verbose level (see mne.verbose).
-    show : bool
-        If True, plot will be shown, else just the figure is returned.
-
-    Returns
-    -------
-    fig : instance of pyplot.Figure
-    """
-
-    return _plot_ica_grid(sources=sources, start=start, stop=stop,
-                          source_idx=source_idx, ncol=ncol, verbose=verbose,
-                          title=title, show=show)
-
-
 def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
-                     stop=None, show=True, title=None):
+                     stop=None, show=True, title=None, block=False):
     """Plot estimated latent sources given the unmixing matrix.
 
     Typical usecases:
@@ -95,25 +66,38 @@ def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
         The ICA solution.
     inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
         The object to plot the sources from.
-    picks : ndarray | None.
+    picks : int | array_like of int | None.
         The components to be displayed. If None, plot will show the
         sources in the order as fitted.
-    start : int
-        X-axis start index. If None from the beginning.
-    stop : int
-        X-axis stop index. If None to the end.
     exclude : array_like of int
         The components marked for exclusion. If None (default), ICA.exclude
         will be used.
+    start : int
+        X-axis start index. If None, from the beginning.
+    stop : int
+        X-axis stop index. If None, next 20 are shown, in case of evoked to the
+        end.
+    show : bool
+        Show figure if True.
     title : str | None
         The figure title. If None a default is provided.
-    show : bool
-        If True, plot will be shown, else just the figure is returned.
+    block : bool
+        Whether to halt program execution until the figure is closed.
+        Useful for interactive selection of components in raw and epoch
+        plotter. For evoked, this parameter has no effect. Defaults to False.
 
     Returns
     -------
     fig : instance of pyplot.Figure
         The figure.
+
+    Notes
+    -----
+    For raw and epoch instances, it is possible to select components for
+    exclusion by clicking on the line. The selected components are added to
+    ``ica.exclude`` on close.
+
+    .. versionadded:: 0.10.0
     """
 
     from ..io.base import _BaseRaw
@@ -122,31 +106,24 @@ def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
 
     if exclude is None:
         exclude = ica.exclude
-
-    if isinstance(inst, (_BaseRaw, _BaseEpochs)):
-        if isinstance(inst, _BaseRaw):
-            sources = ica._transform_raw(inst, start, stop)
-        else:
-            if start is not None or stop is not None:
-                inst = inst.crop(start, stop, copy=True)
-            sources = ica._transform_epochs(inst, concatenate=True)
-        if picks is not None:
-            if np.isscalar(picks):
-                picks = [picks]
-            sources = np.atleast_2d(sources[picks])
-
-        fig = _plot_ica_grid(sources, start=start, stop=stop,
-                             ncol=len(sources) // 10 or 1,
-                             exclude=exclude,
-                             source_idx=picks,
-                             title=title, show=show)
+    elif len(ica.exclude) > 0:
+        exclude = np.union1d(ica.exclude, exclude)
+    if isinstance(inst, _BaseRaw):
+        fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
+                                stop=stop, show=show, title=title,
+                                block=block)
+    elif isinstance(inst, _BaseEpochs):
+        fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
+                                   stop=stop, show=show, title=title,
+                                   block=block)
     elif isinstance(inst, Evoked):
         sources = ica.get_sources(inst)
         if start is not None or stop is not None:
             inst = inst.crop(start, stop, copy=True)
         fig = _plot_ica_sources_evoked(evoked=sources,
+                                       picks=picks,
                                        exclude=exclude,
-                                       title=title)
+                                       title=title, show=show)
     else:
         raise ValueError('Data input must be of Raw or Epochs type')
 
@@ -178,7 +155,7 @@ def _plot_ica_grid(sources, start, stop,
     title : str
         The figure title. If None a default is provided.
     show : bool
-        If True, plot will be shown, else just the figure is returned.
+        If True, all open plots will be shown.
     """
     import matplotlib.pyplot as plt
 
@@ -224,44 +201,89 @@ def _plot_ica_grid(sources, start, stop,
     return fig
 
 
-def _plot_ica_sources_evoked(evoked, exclude, title):
+def _plot_ica_sources_evoked(evoked, picks, exclude, title, show):
     """Plot average over epochs in ICA space
 
     Parameters
     ----------
-    ica : instance of mne.prerocessing.ICA
-        The ICA object.
-    epochs : instance of mne.Epochs
-        The Epochs to be regarded.
+    evoked : instance of mne.Evoked
+        The Evoked to be used.
+    picks : int | array_like of int | None.
+        The components to be displayed. If None, plot will show the
+        sources in the order as fitted.
+    exclude : array_like of int
+        The components marked for exclusion. If None (default), ICA.exclude
+        will be used.
     title : str
         The figure title.
+    show : bool
+        Show figure if True.
     """
     import matplotlib.pyplot as plt
     if title is None:
         title = 'Reconstructed latent sources, time-locked'
 
-    fig = plt.figure()
+    fig, axes = plt.subplots(1)
+    ax = axes
+    axes = [axes]
+    idxs = [0]
     times = evoked.times * 1e3
 
-    # plot unclassified sources
-    plt.plot(times, evoked.data.T, 'k')
-    for ii in exclude:
-        # use indexing to expose event related sources
-        color, label = ('r', 'ICA %02d' % ii)
-        plt.plot(times, evoked.data[ii].T, color='r', label=label)
-
-    plt.title(title)
-    plt.xlim(times[[0, -1]])
-    plt.xlabel('Time (ms)')
-    plt.ylabel('(NA)')
-    plt.legend(loc='best')
+    # plot unclassified sources and label excluded ones
+    lines = list()
+    texts = list()
+    if picks is None:
+        picks = np.arange(evoked.data.shape[0])
+    idxs = [picks]
+    for ii in picks:
+        if ii in exclude:
+            label = 'ICA %03d' % (ii + 1)
+            lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
+                         zorder=1, color='r', label=label))
+        else:
+            lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
+                                 color='k', zorder=0))
+
+    ax.set_title(title)
+    ax.set_xlim(times[[0, -1]])
+    ax.set_xlabel('Time (ms)')
+    ax.set_ylabel('(NA)')
+    if len(exclude) > 0:
+        plt.legend(loc='best')
     tight_layout(fig=fig)
+
+    # for old matplotlib, we actually need this to have a bounding
+    # box (!), so we have to put some valid text here, change
+    # alpha and  path effects later
+    texts.append(ax.text(0, 0, 'blank', zorder=2,
+                         verticalalignment='baseline',
+                         horizontalalignment='left',
+                         fontweight='bold', alpha=0))
+    # this is done to give the structure of a list of lists of a group of lines
+    # in each subplot
+    lines = [lines]
+    ch_names = evoked.ch_names
+
+    from matplotlib import patheffects
+    path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
+                                           alpha=0.75)]
+    params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
+                  ch_names=ch_names, need_draw=False,
+                  path_effects=path_effects)
+    fig.canvas.mpl_connect('pick_event',
+                           partial(_butterfly_onpick, params=params))
+    fig.canvas.mpl_connect('button_press_event',
+                           partial(_butterfly_on_button_press,
+                                   params=params))
+    if show:
+        plt.show()
+
     return fig
 
 
 def plot_ica_scores(ica, scores, exclude=None, axhline=None,
                     title='ICA component scores',
-                    figsize=(12, 6)):
+                    figsize=(12, 6), show=True):
     """Plot scores related to detected components.
 
     Use this function to asses how well your score describes outlier
@@ -281,7 +303,9 @@ def plot_ica_scores(ica, scores, exclude=None, axhline=None,
     title : str
         The figure title.
     figsize : tuple of int
-        The figure size. Defaults to (12, 6)
+        The figure size. Defaults to (12, 6).
+    show : bool
+        Show figure if True.
 
     Returns
     -------
@@ -305,7 +329,7 @@ def plot_ica_scores(ica, scores, exclude=None, axhline=None,
     plt.suptitle(title)
     for this_scores, ax in zip(scores, axes):
         if len(my_range) != len(this_scores):
-            raise ValueError('The length ofr `scores` must equal the '
+            raise ValueError('The length of `scores` must equal the '
                              'number of ICA components.')
         ax.bar(my_range, this_scores, color='w')
         for excl in exclude:
@@ -318,10 +342,13 @@ def plot_ica_scores(ica, scores, exclude=None, axhline=None,
         ax.set_ylabel('score')
         ax.set_xlabel('ICA components')
         ax.set_xlim(0, len(this_scores))
-    plt.show()
+
     tight_layout(fig=fig)
     if len(axes) > 1:
         plt.subplots_adjust(top=0.9)
+
+    if show:
+        plt.show()
     return fig
 
 
@@ -329,10 +356,12 @@ def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
                      stop=None, title=None, show=True):
     """Overlay of raw and cleaned signals given the unmixing matrix.
 
-    This method helps visualizing signal quality and arficat rejection.
+    This method helps visualizing signal quality and artifact rejection.
 
     Parameters
     ----------
+    ica : instance of mne.preprocessing.ICA
+        The ICA object.
     inst : instance of mne.io.Raw or mne.Evoked
         The signals to be compared given the ICA solution. If Raw input,
         The raw data are displayed before and after cleaning. In a second
@@ -352,20 +381,21 @@ def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
         X-axis stop index. If None to the end.
     title : str
         The figure title.
+    show : bool
+        Show figure if True.
 
     Returns
     -------
-        fig : instance of pyplot.Figure
+    fig : instance of pyplot.Figure
         The figure.
     """
     # avoid circular imports
     from ..io.base import _BaseRaw
     from ..evoked import Evoked
     from ..preprocessing.ica import _check_start_stop
-    import matplotlib.pyplot as plt
 
     if not isinstance(inst, (_BaseRaw, Evoked)):
-        raise ValueError('Data input must be of Raw or Epochs type')
+        raise ValueError('Data input must be of Raw or Evoked type')
     if title is None:
         title = 'Signals before (red) and after (black) cleaning'
     if picks is None:
@@ -386,7 +416,7 @@ def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
         data_cln, _ = raw_cln[picks, start_compare:stop_compare]
         fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
                                     times=times * 1e3, title=title,
-                                    ch_types_used=ch_types_used)
+                                    ch_types_used=ch_types_used, show=show)
     elif isinstance(inst, Evoked):
         if start is not None and stop is not None:
             inst = inst.crop(start, stop, copy=True)
@@ -394,13 +424,12 @@ def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
             inst.pick_channels([inst.ch_names[p] for p in picks])
         evoked_cln = ica.apply(inst, exclude=exclude, copy=True)
         fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
-                                       title=title)
-    if show is True:
-        plt.show()
+                                       title=title, show=show)
+
     return fig
 
 
-def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used):
+def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
     """Plot evoked after and before ICA cleaning
 
     Parameters
@@ -409,13 +438,15 @@ def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used):
         The ICA object.
     epochs : instance of mne.Epochs
         The Epochs to be regarded.
+    show : bool
+        Show figure if True.
 
     Returns
     -------
     fig : instance of pyplot.Figure
     """
     import matplotlib.pyplot as plt
-        # Restore sensor space data and keep all PCA components
+    # Restore sensor space data and keep all PCA components
     # let's now compare the date before and after cleaning.
     # first the raw data
     assert data.shape == data_cln.shape
@@ -432,20 +463,24 @@ def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used):
                  'grad': 'Gradiometers',
                  'eeg': 'EEG'}
     ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
-    ax2.set_title('Average across channels ({})'.format(ch_types))
+    ax2.set_title('Average across channels ({0})'.format(ch_types))
     ax2.plot(times, data.mean(0), color='r')
     ax2.plot(times, data_cln.mean(0), color='k')
     ax2.set_xlim(100, 106)
     ax2.set_xlabel('time (ms)')
     ax2.set_xlim(times[0], times[-1])
     tight_layout(fig=fig)
+
     fig.subplots_adjust(top=0.90)
     fig.canvas.draw()
 
+    if show:
+        plt.show()
+
     return fig
 
 
-def _plot_ica_overlay_evoked(evoked, evoked_cln, title):
+def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
     """Plot evoked after and before ICA cleaning
 
     Parameters
@@ -454,6 +489,8 @@ def _plot_ica_overlay_evoked(evoked, evoked_cln, title):
         The ICA object.
     epochs : instance of mne.Epochs
         The Epochs to be regarded.
+    show : bool
+        If True, all open plots will be shown.
 
     Returns
     -------
@@ -473,12 +510,252 @@ def _plot_ica_overlay_evoked(evoked, evoked_cln, title):
     fig.suptitle('Average signal before (red) and after (black) ICA')
     axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
 
-    evoked.plot(axes=axes)
+    evoked.plot(axes=axes, show=show)
     for ax in fig.axes:
-        [l.set_color('r') for l in ax.get_lines()]
+        for l in ax.get_lines():
+            l.set_color('r')
     fig.canvas.draw()
-    evoked_cln.plot(axes=axes)
+    evoked_cln.plot(axes=axes, show=show)
     tight_layout(fig=fig)
+
     fig.subplots_adjust(top=0.90)
     fig.canvas.draw()
+
+    if show:
+        plt.show()
+
     return fig
+
+
+def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
+                      block):
+    """Function for plotting the ICA components as raw array."""
+    import matplotlib.pyplot as plt
+    color = _handle_default('color', (0., 0., 0.))
+    orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
+    if picks is None:
+        picks = range(len(orig_data))
+    types = ['misc' for _ in picks]
+    picks = list(sorted(picks))
+    eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
+    ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
+    data = [orig_data[pick] for pick in picks]
+    c_names = ['ICA %03d' % x for x in range(len(orig_data))]
+    for eog_idx in eog_chs:
+        c_names.append(raw.ch_names[eog_idx])
+        types.append('eog')
+    for ecg_idx in ecg_chs:
+        c_names.append(raw.ch_names[ecg_idx])
+        types.append('ecg')
+    extra_picks = np.append(eog_chs, ecg_chs).astype(int)
+    if len(extra_picks) > 0:
+        eog_ecg_data, _ = raw[extra_picks, :]
+        for idx in range(len(eog_ecg_data)):
+            if idx < len(eog_chs):
+                eog_ecg_data[idx] /= 150e-6  # scaling for eog
+            else:
+                eog_ecg_data[idx] /= 5e-4  # scaling for ecg
+        data = np.append(data, eog_ecg_data, axis=0)
+
+    for idx in range(len(extra_picks)):
+        picks = np.append(picks, ica.n_components_ + idx)
+    if title is None:
+        title = 'ICA components'
+    info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
+
+    info['bads'] = [c_names[x] for x in exclude]
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = start + 20
+        stop = min(stop, raw.times[-1])
+    duration = stop - start
+    if duration <= 0:
+        raise RuntimeError('Stop must be larger than start.')
+    t_end = int(duration * raw.info['sfreq'])
+    times = raw.times[0:t_end]
+    bad_color = (1., 0., 0.)
+    inds = list(range(len(picks)))
+    data = np.array(data)
+    n_channels = min([20, len(picks)])
+    params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end],
+                  ch_start=0, t_start=start, info=info, duration=duration,
+                  ica=ica, n_channels=n_channels, times=times, types=types,
+                  n_times=raw.n_times, bad_color=bad_color, picks=picks)
+    _prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
+                            n_channels)
+    params['scale_factor'] = 1.0
+    params['plot_fun'] = partial(_plot_raw_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color)
+    params['update_fun'] = partial(_update_data, params)
+    params['pick_bads_fun'] = partial(_pick_bads, params=params)
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    _layout_figure(params)
+    # callbacks
+    callback_key = partial(_plot_raw_onkey, params=params)
+    params['fig'].canvas.mpl_connect('key_press_event', callback_key)
+    callback_scroll = partial(_plot_raw_onscroll, params=params)
+    params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
+    callback_pick = partial(_mouse_click, params=params)
+    params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_raw_resize, params=params)
+    params['fig'].canvas.mpl_connect('resize_event', callback_resize)
+    callback_close = partial(_close_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    params['fig_proj'] = None
+    params['event_times'] = None
+    params['update_fun']()
+    params['plot_fun']()
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+def _update_data(params):
+    """Function for preparing the data on horizontal shift of the viewport."""
+    sfreq = params['info']['sfreq']
+    start = int(params['t_start'] * sfreq)
+    end = int((params['t_start'] + params['duration']) * sfreq)
+    params['data'] = params['orig_data'][:, start:end]
+    params['times'] = params['raw'].times[start:end]
+
+
+def _pick_bads(event, params):
+    """Function for selecting components on click."""
+    bads = params['info']['bads']
+    params['info']['bads'] = _select_bads(event, params, bads)
+    params['update_fun']()
+    params['plot_fun']()
+
+
+def _close_event(events, params):
+    """Function for excluding the selected components on close."""
+    info = params['info']
+    c_names = ['ICA %03d' % x for x in range(params['ica'].n_components_)]
+    exclude = [c_names.index(x) for x in info['bads'] if x.startswith('ICA')]
+    params['ica'].exclude = exclude
+
+
+def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
+                         title, block):
+    """Function for plotting the components as epochs."""
+    import matplotlib.pyplot as plt
+    data = ica._transform_epochs(epochs, concatenate=True)
+    eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
+    ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
+    c_names = ['ICA %03d' % x for x in range(ica.n_components_)]
+    ch_types = np.repeat('misc', ica.n_components_)
+    for eog_idx in eog_chs:
+        c_names.append(epochs.ch_names[eog_idx])
+        ch_types = np.append(ch_types, 'eog')
+    for ecg_idx in ecg_chs:
+        c_names.append(epochs.ch_names[ecg_idx])
+        ch_types = np.append(ch_types, 'ecg')
+    extra_picks = np.append(eog_chs, ecg_chs).astype(int)
+    if len(extra_picks) > 0:
+        eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
+                                      axis=1)
+        data = np.append(data, eog_ecg_data, axis=0)
+    scalings = _handle_default('scalings_plot_raw')
+    scalings['misc'] = 5.0
+    info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
+                       ch_types=ch_types)
+    info['projs'] = list()
+    info['bads'] = [c_names[x] for x in exclude]
+    if title is None:
+        title = 'ICA components'
+    if picks is None:
+        picks = list(range(ica.n_components_))
+    if start is None:
+        start = 0
+    if stop is None:
+        stop = start + 20
+        stop = min(stop, len(epochs.events))
+    for idx in range(len(extra_picks)):
+        picks = np.append(picks, ica.n_components_ + idx)
+    n_epochs = stop - start
+    if n_epochs <= 0:
+        raise RuntimeError('Stop must be larger than start.')
+    params = {'ica': ica,
+              'epochs': epochs,
+              'info': info,
+              'orig_data': data,
+              'bads': list(),
+              'bad_color': (1., 0., 0.),
+              't_start': start * len(epochs.times)}
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    _prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
+                               n_epochs=n_epochs, scalings=scalings,
+                               title=title, picks=picks,
+                               order=['misc', 'eog', 'ecg'])
+    params['hsel_patch'].set_x(params['t_start'])
+    callback_close = partial(_close_epochs_event, params=params)
+    params['fig'].canvas.mpl_connect('close_event', callback_close)
+    if show:
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
+
+    return params['fig']
+
+
+def _close_epochs_event(events, params):
+    """Function for excluding the selected components on close."""
+    info = params['info']
+    exclude = [info['ch_names'].index(x) for x in info['bads']
+               if x.startswith('ICA')]
+    params['ica'].exclude = exclude
+
+
+def _label_clicked(pos, params):
+    """Function for plotting independent components on click to label."""
+    import matplotlib.pyplot as plt
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
+    if line_idx >= len(params['picks']):
+        return
+    ic_idx = [params['picks'][line_idx]]
+    types = list()
+    info = params['ica'].info
+    if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
+        types.append('eeg')
+    if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
+        types.append('mag')
+    if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
+        types.append('grad')
+
+    ica = params['ica']
+    data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
+                  ica.pca_components_[:ica.n_components_])
+    data = np.atleast_2d(data)
+    fig, axes = _prepare_trellis(len(types), max_col=3)
+    for ch_idx, ch_type in enumerate(types):
+        try:
+            data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
+                                                                    ch_type,
+                                                                    None)
+        except Exception as exc:
+            logger.warning(exc)
+            plt.close(fig)
+            return
+        this_data = data[:, data_picks]
+        ax = axes[ch_idx]
+        if merge_grads:
+            from ..channels.layout import _merge_grad_data
+        for ii, data_ in zip(ic_idx, this_data):
+            ax.set_title('IC #%03d ' % ii + ch_type, fontsize=12)
+            data_ = _merge_grad_data(data_) if merge_grads else data_
+            plot_topomap(data_.flatten(), pos, axis=ax, show=False)
+            ax.set_yticks([])
+            ax.set_xticks([])
+            ax.set_frame_on(False)
+    tight_layout(fig=fig)
+    fig.subplots_adjust(top=0.95)
+    fig.canvas.draw()
+
+    plt.show()
diff --git a/mne/viz/misc.py b/mne/viz/misc.py
index 71f1fad..abcff98 100644
--- a/mne/viz/misc.py
+++ b/mne/viz/misc.py
@@ -45,11 +45,11 @@ def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
         Show colorbar or not.
     proj : bool
         Apply projections or not.
-    show : bool
-        Call pyplot.show() as the end or not.
     show_svd : bool
-        Plot also singular values of the noise covariance for each sensor type.
-        We show square roots ie. standard deviations.
+        Plot also singular values of the noise covariance for each sensor
+        type. We show square roots ie. standard deviations.
+    show : bool
+        Show figure if True.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
 
@@ -62,7 +62,7 @@ def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
     """
     if exclude == 'bads':
         exclude = info['bads']
-    ch_names = [n for n in cov.ch_names if not n in exclude]
+    ch_names = [n for n in cov.ch_names if n not in exclude]
     ch_idx = [cov.ch_names.index(n) for n in ch_names]
     info_ch_names = info['ch_names']
     sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
@@ -107,7 +107,7 @@ def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
     fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
     for k, (idx, name, _, _) in enumerate(idx_names):
         plt.subplot(1, len(idx_names), k + 1)
-        plt.imshow(C[idx][:, idx], interpolation="nearest")
+        plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
         plt.title(name)
     plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
     tight_layout(fig=fig_cov)
@@ -176,13 +176,13 @@ def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
     freq_bounds = sorted(set(np.ravel(freq_bins)))
     freq_ticks = copy.deepcopy(freq_bounds)
 
-    # Rejecting time points that will not be plotted
+    # Reject time points that will not be plotted and gather results
+    source_power = []
     for stc in stcs:
-        # Using 1e-10 to improve numerical stability
-        stc.crop(tmin - 1e-10, tmax - stc.tstep + 1e-10)
-
-    # Gathering results for each time window
-    source_power = np.array([stc.data for stc in stcs])
+        stc = stc.copy()  # copy since crop modifies inplace
+        stc.crop(tmin, tmax - stc.tstep)
+        source_power.append(stc.data)
+    source_power = np.array(source_power)
 
     # Finding the source with maximum source power
     if source_index is None:
@@ -205,7 +205,7 @@ def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
     # Plotting the results
     fig = plt.figure(figsize=(9, 6))
     plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
-               cmap=plt.cm.jet)
+               cmap='Reds')
     ax = plt.gca()
 
     plt.title('Time-frequency source power')
@@ -254,11 +254,11 @@ def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
         The filenames for the BEM surfaces in the format
         ['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
     orientation : str
-        'coronal' or 'transverse' or 'sagittal'
+        'coronal' or 'axial' or 'sagittal'
     slices : list of int
         Slice indices.
     show : bool
-        Call pyplot.show() at the end.
+        Show figure if True.
 
     Returns
     -------
@@ -330,9 +330,9 @@ def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
                               surf['tris'], surf['rr'][:, 0],
                               levels=[sl], colors='yellow', linewidths=2.0)
 
+    plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
+                        hspace=0.)
     if show:
-        plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
-                            hspace=0.)
         plt.show()
 
     return fig
@@ -350,11 +350,11 @@ def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
         Path to the SUBJECTS_DIR. If None, the path is obtained by using
         the environment variable SUBJECTS_DIR.
     orientation : str
-        'coronal' or 'transverse' or 'sagittal'.
+        'coronal' or 'axial' or 'sagittal'.
     slices : list of int
         Slice indices.
     show : bool
-        Call pyplot.show() at the end.
+        Show figure if True.
 
     Returns
     -------
@@ -377,14 +377,14 @@ def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
     surf_fnames = []
     for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
         surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
-        if len(surf_name) > 0:
+        if len(surf_fname) > 0:
             surf_fname = surf_fname[0]
             logger.info("Using surface: %s" % surf_fname)
-        else:
-            raise IOError('No surface found for %s.' % surf_name)
-        if not op.isfile(surf_fname):
-            raise IOError('Surface file "%s" does not exist' % surf_fname)
-        surf_fnames.append(surf_fname)
+            surf_fnames.append(surf_fname)
+
+    if len(surf_fnames) == 0:
+        raise IOError('No surface files found. Surface files must end with '
+                      'inner_skull.surf, outer_skull.surf or outer_skin.surf')
 
     # Plot the contours
     return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
@@ -420,12 +420,16 @@ def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
     equal_spacing : bool
         Use equal spacing between events in y-axis.
     show : bool
-        Call pyplot.show() at the end.
+        Show figure if True.
 
     Returns
     -------
     fig : matplotlib.figure.Figure
         The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
 
     if sfreq is None:
@@ -490,7 +494,9 @@ def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
         ev_mask = events[:, 2] == ev
         kwargs = {}
         if event_id is not None:
-            kwargs['label'] = event_id_rev[ev]
+            event_label = '{0} ({1})'.format(event_id_rev[ev],
+                                             np.sum(ev_mask))
+            kwargs['label'] = event_label
         if ev in color:
             kwargs['color'] = color[ev]
         if equal_spacing:
@@ -512,10 +518,63 @@ def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
 
     ax.grid('on')
 
+    fig = fig if fig is not None else plt.gcf()
     if event_id is not None:
-        ax.legend()
-
+        box = ax.get_position()
+        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
+        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
+        fig.canvas.draw()
     if show:
         plt.show()
 
-    return fig if fig is not None else plt.gcf()
+    return fig
+
+
+def _get_presser(fig):
+    """Helper to get our press callback"""
+    callbacks = fig.canvas.callbacks.callbacks['button_press_event']
+    func = None
+    for key, val in callbacks.items():
+        if val.func.__class__.__name__ == 'partial':
+            func = val.func
+            break
+    assert func is not None
+    return func
+
+
+def plot_dipole_amplitudes(dipoles, colors=None, show=True):
+    """Plot the amplitude traces of a set of dipoles
+
+    Parameters
+    ----------
+    dipoles : list of instance of Dipoles
+        The dipoles whose amplitudes should be shown.
+    colors: list of colors | None
+        Color to plot with each dipole. If None default colors are used.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The figure object containing the plot.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+    """
+    import matplotlib.pyplot as plt
+    if colors is None:
+        colors = cycle(COLORS)
+    fig, ax = plt.subplots(1, 1)
+    xlim = [np.inf, -np.inf]
+    for dip, color in zip(dipoles, colors):
+        ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
+        xlim[0] = min(xlim[0], dip.times[0])
+        xlim[1] = max(xlim[1], dip.times[-1])
+    ax.set_xlim(xlim)
+    ax.set_xlabel('Time (sec)')
+    ax.set_ylabel('Amplitude (nAm)')
+    if show:
+        fig.show()
+    return fig
diff --git a/mne/viz/montage.py b/mne/viz/montage.py
new file mode 100644
index 0000000..184029a
--- /dev/null
+++ b/mne/viz/montage.py
@@ -0,0 +1,58 @@
+"""Functions to plot EEG sensor montages or digitizer montages
+"""
+import numpy as np
+
+
+def plot_montage(montage, scale_factor=1.5, show_names=False, show=True):
+    """Plot a montage
+
+    Parameters
+    ----------
+    montage : instance of Montage
+        The montage to visualize.
+    scale_factor : float
+        Determines the size of the points. Defaults to 1.5.
+    show_names : bool
+        Whether to show the channel names. Defaults to False.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        The figure object.
+    """
+    from ..channels.montage import Montage, DigMontage
+
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.mplot3d import Axes3D  # noqa
+    fig = plt.figure()
+    ax = fig.add_subplot(111, projection='3d')
+
+    if isinstance(montage, Montage):
+        pos = montage.pos
+        ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
+        if show_names:
+            ch_names = montage.ch_names
+            for ch_name, x, y, z in zip(ch_names, pos[:, 0],
+                                        pos[:, 1], pos[:, 2]):
+                ax.text(x, y, z, ch_name)
+    elif isinstance(montage, DigMontage):
+        pos = np.vstack((montage.hsp, montage.elp))
+        ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2])
+        if show_names:
+            if montage.point_names:
+                hpi_names = montage.point_names
+                for hpi_name, x, y, z in zip(hpi_names, montage.elp[:, 0],
+                                             montage.elp[:, 1],
+                                             montage.elp[:, 2]):
+                    ax.text(x, y, z, hpi_name)
+
+    ax.set_xlabel('x')
+    ax.set_ylabel('y')
+    ax.set_zlabel('z')
+
+    if show:
+        plt.show()
+
+    return fig
diff --git a/mne/viz/raw.py b/mne/viz/raw.py
index b9ad6b7..a5a3934 100644
--- a/mne/viz/raw.py
+++ b/mne/viz/raw.py
@@ -3,6 +3,7 @@
 from __future__ import print_function
 
 # Authors: Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
 #
 # License: Simplified BSD
 
@@ -14,264 +15,72 @@ import numpy as np
 from ..externals.six import string_types
 from ..io.pick import pick_types
 from ..io.proj import setup_proj
-from ..utils import set_config, get_config, verbose
+from ..utils import verbose, get_config
 from ..time_frequency import compute_raw_psd
-from .utils import figure_nobar, _toggle_options
-from .utils import _mutable_defaults, _toggle_proj, tight_layout
+from .utils import _toggle_options, _toggle_proj, tight_layout
+from .utils import _layout_figure, _plot_raw_onkey, figure_nobar
+from .utils import _plot_raw_onscroll, _mouse_click
+from .utils import _helper_raw_resize, _select_bads, _onclick_help
+from ..defaults import _handle_default
 
 
 def _plot_update_raw_proj(params, bools):
     """Helper only needs to be called when proj is changed"""
-    inds = np.where(bools)[0]
-    params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
-                               for ii in inds]
-    params['proj_bools'] = bools
+    if bools is not None:
+        inds = np.where(bools)[0]
+        params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
+                                   for ii in inds]
+        params['proj_bools'] = bools
     params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
                                         verbose=False)
-    _update_raw_data(params)
+    params['update_fun']()
     params['plot_fun']()
 
 
 def _update_raw_data(params):
     """Helper only needs to be called when time or proj is changed"""
+    from scipy.signal import filtfilt
     start = params['t_start']
     stop = params['raw'].time_as_index(start + params['duration'])[0]
     start = params['raw'].time_as_index(start)[0]
+    data_picks = pick_types(params['raw'].info, meg=True, eeg=True)
     data, times = params['raw'][:, start:stop]
     if params['projector'] is not None:
         data = np.dot(params['projector'], data)
     # remove DC
     if params['remove_dc'] is True:
         data -= np.mean(data, axis=1)[:, np.newaxis]
+    if params['ba'] is not None:
+        data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],
+                                    data[data_picks], axis=1, padlen=0)
     # scale
     for di in range(data.shape[0]):
         data[di] /= params['scalings'][params['types'][di]]
         # stim channels should be hard limited
         if params['types'][di] == 'stim':
             data[di] = np.minimum(data[di], 1.0)
+    # clip
+    if params['clipping'] == 'transparent':
+        data[np.logical_or(data > 1, data < -1)] = np.nan
+    elif params['clipping'] == 'clamp':
+        data = np.clip(data, -1, 1, data)
     params['data'] = data
     params['times'] = times
 
 
-def _layout_raw(params):
-    """Set raw figure layout"""
-    s = params['fig'].get_size_inches()
-    scroll_width = 0.33
-    hscroll_dist = 0.33
-    vscroll_dist = 0.1
-    l_border = 1.2
-    r_border = 0.1
-    t_border = 0.33
-    b_border = 0.5
-
-    # only bother trying to reset layout if it's reasonable to do so
-    if s[0] < 2 * scroll_width or s[1] < 2 * scroll_width + hscroll_dist:
-        return
-
-    # convert to relative units
-    scroll_width_x = scroll_width / s[0]
-    scroll_width_y = scroll_width / s[1]
-    vscroll_dist /= s[0]
-    hscroll_dist /= s[1]
-    l_border /= s[0]
-    r_border /= s[0]
-    t_border /= s[1]
-    b_border /= s[1]
-    # main axis (traces)
-    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
-    ax_y = hscroll_dist + scroll_width_y + b_border
-    ax_height = 1.0 - ax_y - t_border
-    params['ax'].set_position([l_border, ax_y, ax_width, ax_height])
-    # vscroll (channels)
-    pos = [ax_width + l_border + vscroll_dist, ax_y,
-           scroll_width_x, ax_height]
-    params['ax_vscroll'].set_position(pos)
-    # hscroll (time)
-    pos = [l_border, b_border, ax_width, scroll_width_y]
-    params['ax_hscroll'].set_position(pos)
-    # options button
-    pos = [l_border + ax_width + vscroll_dist, b_border,
-           scroll_width_x, scroll_width_y]
-    params['ax_button'].set_position(pos)
-    params['fig'].canvas.draw()
-
-
-def _helper_resize(event, params):
-    """Helper for resizing"""
-    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
-    set_config('MNE_BROWSE_RAW_SIZE', size)
-    _layout_raw(params)
-
-
 def _pick_bad_channels(event, params):
     """Helper for selecting / dropping bad channels onpick"""
+    # Both bad lists are updated. params['info'] used for colors.
     bads = params['raw'].info['bads']
-    # trade-off, avoid selecting more than one channel when drifts are present
-    # however for clean data don't click on peaks but on flat segments
-    f = lambda x, y: y(np.mean(x), x.std() * 2)
-    for l in event.inaxes.lines:
-        ydata = l.get_ydata()
-        if not isinstance(ydata, list) and not np.isnan(ydata).any():
-            ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
-            if ymin <= event.ydata <= ymax:
-                this_chan = vars(l)['ch_name']
-                if this_chan in params['raw'].ch_names:
-                    if this_chan not in bads:
-                        bads.append(this_chan)
-                        l.set_color(params['bad_color'])
-                    else:
-                        bads.pop(bads.index(this_chan))
-                        l.set_color(vars(l)['def-color'])
-                event.canvas.draw()
-                break
-    # update deep-copied info to persistently draw bads
-    params['info']['bads'] = bads
-
-
-def _mouse_click(event, params):
-    """Vertical select callback"""
-    if event.inaxes is None or event.button != 1:
-        return
-    plot_fun = params['plot_fun']
-    # vertical scrollbar changed
-    if event.inaxes == params['ax_vscroll']:
-        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
-        if params['ch_start'] != ch_start:
-            params['ch_start'] = ch_start
-            plot_fun()
-    # horizontal scrollbar changed
-    elif event.inaxes == params['ax_hscroll']:
-        _plot_raw_time(event.xdata - params['duration'] / 2, params)
-
-    elif event.inaxes == params['ax']:
-        _pick_bad_channels(event, params)
-
-
-def _plot_raw_time(value, params):
-    """Deal with changed time value"""
-    info = params['info']
-    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
-    if value > max_times:
-        value = params['n_times'] / info['sfreq'] - params['duration']
-    if value < 0:
-        value = 0
-    if params['t_start'] != value:
-        params['t_start'] = value
-        params['hsel_patch'].set_x(value)
-        _update_raw_data(params)
-        params['plot_fun']()
-
-
-def _plot_raw_onkey(event, params):
-    """Interpret key presses"""
-    import matplotlib.pyplot as plt
-    # check for initial plot
-    plot_fun = params['plot_fun']
-    if event is None:
-        plot_fun()
-        return
-
-    # quit event
-    if event.key == 'escape':
-        plt.close(params['fig'])
-        return
-
-    # change plotting params
-    ch_changed = False
-    if event.key == 'down':
-        params['ch_start'] += params['n_channels']
-        ch_changed = True
-    elif event.key == 'up':
-        params['ch_start'] -= params['n_channels']
-        ch_changed = True
-    elif event.key == 'right':
-        _plot_raw_time(params['t_start'] + params['duration'], params)
-        return
-    elif event.key == 'left':
-        _plot_raw_time(params['t_start'] - params['duration'], params)
-        return
-    elif event.key in ['o', 'p']:
-        _toggle_options(None, params)
-        return
-
-    # deal with plotting changes
-    if ch_changed is True:
-        if params['ch_start'] >= len(params['info']['ch_names']):
-            params['ch_start'] = 0
-        elif params['ch_start'] < 0:
-            # wrap to end
-            rem = len(params['info']['ch_names']) % params['n_channels']
-            params['ch_start'] = len(params['info']['ch_names'])
-            params['ch_start'] -= rem if rem != 0 else params['n_channels']
-
-    if ch_changed:
-        plot_fun()
-
-
-def _plot_traces(params, inds, color, bad_color, lines, event_line, offsets):
-    """Helper for plotting raw"""
-
-    info = params['info']
-    n_channels = params['n_channels']
-    params['bad_color'] = bad_color
-    # do the plotting
-    tick_list = []
-    for ii in range(n_channels):
-        ch_ind = ii + params['ch_start']
-        # let's be generous here and allow users to pass
-        # n_channels per view >= the number of traces available
-        if ii >= len(lines):
-            break
-        elif ch_ind < len(info['ch_names']):
-            # scale to fit
-            ch_name = info['ch_names'][inds[ch_ind]]
-            tick_list += [ch_name]
-            offset = offsets[ii]
-
-            # do NOT operate in-place lest this get screwed up
-            this_data = params['data'][inds[ch_ind]]
-            this_color = bad_color if ch_name in info['bads'] else color
-            if isinstance(this_color, dict):
-                this_color = this_color[params['types'][inds[ch_ind]]]
-
-            # subtraction here gets corect orientation for flipped ylim
-            lines[ii].set_ydata(offset - this_data)
-            lines[ii].set_xdata(params['times'])
-            lines[ii].set_color(this_color)
-            vars(lines[ii])['ch_name'] = ch_name
-            vars(lines[ii])['def-color'] = color[params['types'][inds[ch_ind]]]
-        else:
-            # "remove" lines
-            lines[ii].set_xdata([])
-            lines[ii].set_ydata([])
-    # deal with event lines
-    if params['events'] is not None:
-        t = params['events']
-        t = t[np.where(np.logical_and(t >= params['times'][0],
-                       t <= params['times'][-1]))[0]]
-        if len(t) > 0:
-            xs = list()
-            ys = list()
-            for tt in t:
-                xs += [tt, tt, np.nan]
-                ys += [0, 2 * n_channels + 1, np.nan]
-            event_line.set_xdata(xs)
-            event_line.set_ydata(ys)
-        else:
-            event_line.set_xdata([])
-            event_line.set_ydata([])
-    # finalize plot
-    params['ax'].set_xlim(params['times'][0],
-                          params['times'][0] + params['duration'], False)
-    params['ax'].set_yticklabels(tick_list)
-    params['vsel_patch'].set_y(params['ch_start'])
-    params['fig'].canvas.draw()
+    params['info']['bads'] = _select_bads(event, params, bads)
+    _plot_update_raw_proj(params, None)
 
 
 def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
              bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
              event_color='cyan', scalings=None, remove_dc=True, order='type',
-             show_options=False, title=None, show=True, block=False):
+             show_options=False, title=None, show=True, block=False,
+             highpass=None, lowpass=None, filtorder=4, clipping=None):
     """Plot raw data
 
     Parameters
@@ -281,7 +90,8 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
     events : array | None
         Events to show with vertical bars.
     duration : float
-        Time window (sec) to plot in a given time.
+        Time window (sec) to plot. The lesser of this value and the duration
+        of the raw file will be used.
     start : float
         Initial time to show (can be changed dynamically once plotted).
     n_channels : int
@@ -289,17 +99,25 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
     bgcolor : color object
         Color of the background.
     color : dict | color object | None
-        Color for the data traces. If None, defaults to:
-        `dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
-             ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
+        Color for the data traces. If None, defaults to::
+
+            dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
+                 emg='k', ref_meg='steelblue', misc='k', stim='k',
+                 resp='k', chpi='k')
+
     bad_color : color object
         Color to make bad channels.
-    event_color : color object
-        Color to use for events.
+    event_color : color object | dict
+        Color to use for events. Can also be a dict with
+        ``{event_number: color}`` pairings. Use ``event_number==-1`` for
+        any event numbers in the events list that are not in the dictionary.
     scalings : dict | None
-        Scale factors for the traces. If None, defaults to:
-        `dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
-             ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
+        Scale factors for the traces. If None, defaults to::
+
+            dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
+                 emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
+                 resp=1, chpi=1e-4)
+
     remove_dc : bool
         If True remove DC component when plotting data.
     order : 'type' | 'original' | array
@@ -307,15 +125,32 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
         'original' plots in the order of ch_names, array gives the
         indices to use in plotting.
     show_options : bool
-        If True, a dialog for options related to projecion is shown.
+        If True, a dialog for options related to projection is shown.
     title : str | None
         The title of the window. If None, and either the filename of the
         raw object or '<unknown>' will be displayed as title.
     show : bool
-        Show figure if True
+        Show figure if True.
     block : bool
         Whether to halt program execution until the figure is closed.
         Useful for setting bad channels on the fly by clicking on a line.
+        May not work on all systems / platforms.
+    highpass : float | None
+        Highpass to apply when displaying data.
+    lowpass : float | None
+        Lowpass to apply when displaying data.
+    filtorder : int
+        Filtering order. Note that for efficiency and simplicity,
+        filtering during plotting uses forward-backward IIR filtering,
+        so the effective filter order will be twice ``filtorder``.
+        Filtering the lines for display may also produce some edge
+        artifacts (at the left and right edges) of the signals
+        during display. Filtering requires scipy >= 0.10.
+    clipping : str | None
+        If None, channels are allowed to exceed their designated bounds in
+        the plot. If "clamp", then values are clamped to the appropriate
+        range for display, creating step-like artifacts. If "transparent",
+        then excessive values are not shown, creating gaps in the traces.
 
     Returns
     -------
@@ -326,15 +161,45 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
     -----
     The arrow keys (up/down/left/right) can typically be used to navigate
     between channels and time ranges, but this depends on the backend
-    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
-    To mark or un-mark a channel as bad, click on the rather flat segments
-    of a channel's time series. The changes will be reflected immediately
-    in the raw object's ``raw.info['bads']`` entry.
+    matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The
+    scaling can be adjusted with - and + (or =) keys. The viewport dimensions
+    can be adjusted with page up/page down and home/end keys. Full screen mode
+    can be to toggled with f11 key. To mark or un-mark a channel as bad, click
+    on the rather flat segments of a channel's time series. The changes will be
+    reflected immediately in the raw object's ``raw.info['bads']`` entry.
     """
     import matplotlib.pyplot as plt
     import matplotlib as mpl
-    color, scalings = _mutable_defaults(('color', color),
-                                        ('scalings_plot_raw', scalings))
+    from scipy.signal import butter
+    color = _handle_default('color', color)
+    scalings = _handle_default('scalings_plot_raw', scalings)
+
+    if clipping is not None and clipping not in ('clamp', 'transparent'):
+        raise ValueError('clipping must be None, "clamp", or "transparent", '
+                         'not %s' % clipping)
+    # figure out the IIR filtering parameters
+    nyq = raw.info['sfreq'] / 2.
+    if highpass is None and lowpass is None:
+        ba = None
+    else:
+        filtorder = int(filtorder)
+        if filtorder <= 0:
+            raise ValueError('filtorder (%s) must be >= 1' % filtorder)
+        if highpass is not None and highpass <= 0:
+            raise ValueError('highpass must be > 0, not %s' % highpass)
+        if lowpass is not None and lowpass >= nyq:
+            raise ValueError('lowpass must be < nyquist (%s), not %s'
+                             % (nyq, lowpass))
+        if highpass is None:
+            ba = butter(filtorder, lowpass / nyq, 'lowpass', analog=False)
+        elif lowpass is None:
+            ba = butter(filtorder, highpass / nyq, 'highpass', analog=False)
+        else:
+            if lowpass <= highpass:
+                raise ValueError('lowpass (%s) must be > highpass (%s)'
+                                 % (lowpass, highpass))
+            ba = butter(filtorder, [highpass / nyq, lowpass / nyq], 'bandpass',
+                        analog=False)
 
     # make a copy of info, remove projection (for now)
     info = copy.deepcopy(raw.info)
@@ -356,8 +221,11 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
     elif not isinstance(title, string_types):
         raise TypeError('title must be None or a string')
     if events is not None:
-        events = events[:, 0].astype(float) - raw.first_samp
-        events /= info['sfreq']
+        event_times = events[:, 0].astype(float) - raw.first_samp
+        event_times /= info['sfreq']
+        event_nums = events[:, 2]
+    else:
+        event_times = event_nums = None
 
     # reorganize the data in plotting order
     inds = list()
@@ -393,80 +261,56 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
         # put back to original order first, then use new order
         inds = inds[reord][order]
 
+    if not isinstance(event_color, dict):
+        event_color = {-1: event_color}
+    else:
+        event_color = copy.deepcopy(event_color)  # we might modify it
+    for key in event_color:
+        if not isinstance(key, int):
+            raise TypeError('event_color key "%s" was a %s not an int'
+                            % (key, type(key)))
+        if key <= 0 and key != -1:
+            raise KeyError('only key <= 0 allowed is -1 (cannot use %s)'
+                           % key)
+
     # set up projection and data parameters
+    duration = min(raw.times[-1], float(duration))
     params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
-                  info=info, projs=projs, remove_dc=remove_dc,
+                  info=info, projs=projs, remove_dc=remove_dc, ba=ba,
                   n_channels=n_channels, scalings=scalings, types=types,
-                  n_times=n_times, events=events)
+                  n_times=n_times, event_times=event_times,
+                  event_nums=event_nums, clipping=clipping, fig_proj=None)
 
-    # set up plotting
-    size = get_config('MNE_BROWSE_RAW_SIZE')
-    if size is not None:
-        size = size.split(',')
-        size = tuple([float(s) for s in size])
-        # have to try/catch when there's no toolbar
-    fig = figure_nobar(facecolor=bgcolor, figsize=size)
-    fig.canvas.set_window_title('mne_browse_raw')
-    ax = plt.subplot2grid((10, 10), (0, 0), colspan=9, rowspan=9)
-    ax.set_title(title, fontsize=12)
-    ax_hscroll = plt.subplot2grid((10, 10), (9, 0), colspan=9)
-    ax_hscroll.get_yaxis().set_visible(False)
-    ax_hscroll.set_xlabel('Time (s)')
-    ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
-    ax_vscroll.set_axis_off()
-    ax_button = plt.subplot2grid((10, 10), (9, 9))
-    # store these so they can be fixed on resize
-    params['fig'] = fig
-    params['ax'] = ax
-    params['ax_hscroll'] = ax_hscroll
-    params['ax_vscroll'] = ax_vscroll
-    params['ax_button'] = ax_button
+    _prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
+                            n_channels)
 
-    # populate vertical and horizontal scrollbars
-    for ci in range(len(info['ch_names'])):
-        this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
-                      else color)
-        if isinstance(this_color, dict):
-            this_color = this_color[types[inds[ci]]]
-        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
-                                                   facecolor=this_color,
-                                                   edgecolor=this_color))
-    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
-                                       facecolor='w', edgecolor='w')
-    ax_vscroll.add_patch(vsel_patch)
-    params['vsel_patch'] = vsel_patch
-    hsel_patch = mpl.patches.Rectangle((start, 0), duration, 1, color='k',
-                                       edgecolor=None, alpha=0.5)
-    ax_hscroll.add_patch(hsel_patch)
-    params['hsel_patch'] = hsel_patch
-    ax_hscroll.set_xlim(0, n_times / float(info['sfreq']))
-    n_ch = len(info['ch_names'])
-    ax_vscroll.set_ylim(n_ch, 0)
-    ax_vscroll.set_title('Ch.')
-
-    # make shells for plotting traces
-    offsets = np.arange(n_channels) * 2 + 1
-    ax.set_yticks(offsets)
-    ax.set_ylim([n_channels * 2 + 1, 0])
     # plot event_line first so it's in the back
-    event_line = ax.plot([np.nan], color=event_color)[0]
-    lines = [ax.plot([np.nan])[0] for _ in range(n_ch)]
-    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
-
-    params['plot_fun'] = partial(_plot_traces, params=params, inds=inds,
-                                 color=color, bad_color=bad_color, lines=lines,
-                                 event_line=event_line, offsets=offsets)
-
+    event_lines = [params['ax'].plot([np.nan], color=event_color[ev_num])[0]
+                   for ev_num in sorted(event_color.keys())]
+    params['plot_fun'] = partial(_plot_raw_traces, params=params, inds=inds,
+                                 color=color, bad_color=bad_color,
+                                 event_lines=event_lines,
+                                 event_color=event_color)
+    params['update_fun'] = partial(_update_raw_data, params=params)
+    params['pick_bads_fun'] = partial(_pick_bad_channels, params=params)
+    params['label_click_fun'] = partial(_label_clicked, params=params)
+    params['scale_factor'] = 1.0
     # set up callbacks
-    opt_button = mpl.widgets.Button(ax_button, 'Opt')
-    callback_option = partial(_toggle_options, params=params)
-    opt_button.on_clicked(callback_option)
+    opt_button = None
+    if len(raw.info['projs']) > 0 and not raw.proj:
+        ax_button = plt.subplot2grid((10, 10), (9, 9))
+        params['ax_button'] = ax_button
+        opt_button = mpl.widgets.Button(ax_button, 'Proj')
+        callback_option = partial(_toggle_options, params=params)
+        opt_button.on_clicked(callback_option)
     callback_key = partial(_plot_raw_onkey, params=params)
-    fig.canvas.mpl_connect('key_press_event', callback_key)
+    params['fig'].canvas.mpl_connect('key_press_event', callback_key)
+    callback_scroll = partial(_plot_raw_onscroll, params=params)
+    params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
     callback_pick = partial(_mouse_click, params=params)
-    fig.canvas.mpl_connect('button_press_event', callback_pick)
-    callback_resize = partial(_helper_resize, params=params)
-    fig.canvas.mpl_connect('resize_event', callback_resize)
+    params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
+    callback_resize = partial(_helper_raw_resize, params=params)
+    params['fig'].canvas.mpl_connect('resize_event', callback_resize)
 
     # As here code is shared with plot_evoked, some extra steps:
     # first the actual plot update function
@@ -481,23 +325,94 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
 
     # do initial plots
     callback_proj('none')
-    _layout_raw(params)
+    _layout_figure(params)
 
     # deal with projectors
-    params['fig_opts'] = None
     if show_options is True:
         _toggle_options(None, params)
 
     if show:
-        plt.show(block=block)
+        try:
+            plt.show(block=block)
+        except TypeError:  # not all versions have this
+            plt.show()
 
-    return fig
+    return params['fig']
+
+
+def _label_clicked(pos, params):
+    """Helper function for selecting bad channels."""
+    labels = params['ax'].yaxis.get_ticklabels()
+    offsets = np.array(params['offsets']) + params['offsets'][0]
+    line_idx = np.searchsorted(offsets, pos[1])
+    text = labels[line_idx].get_text()
+    if len(text) == 0:
+        return
+    ch_idx = params['ch_start'] + line_idx
+    bads = params['info']['bads']
+    if text in bads:
+        while text in bads:  # to make sure duplicates are removed
+            bads.remove(text)
+        color = vars(params['lines'][line_idx])['def_color']
+        params['ax_vscroll'].patches[ch_idx].set_color(color)
+    else:
+        bads.append(text)
+        color = params['bad_color']
+        params['ax_vscroll'].patches[ch_idx].set_color(color)
+    params['raw'].info['bads'] = bads
+    _plot_update_raw_proj(params, None)
+
+
+def _set_psd_plot_params(info, proj, picks, ax, area_mode):
+    """Aux function"""
+    import matplotlib.pyplot as plt
+    if area_mode not in [None, 'std', 'range']:
+        raise ValueError('"area_mode" must be "std", "range", or None')
+    if picks is None:
+        if ax is not None:
+            raise ValueError('If "ax" is not supplied (None), then "picks" '
+                             'must also be supplied')
+        megs = ['mag', 'grad', False]
+        eegs = [False, False, True]
+        names = ['Magnetometers', 'Gradiometers', 'EEG']
+        picks_list = list()
+        titles_list = list()
+        for meg, eeg, name in zip(megs, eegs, names):
+            picks = pick_types(info, meg=meg, eeg=eeg, ref_meg=False)
+            if len(picks) > 0:
+                picks_list.append(picks)
+                titles_list.append(name)
+        if len(picks_list) == 0:
+            raise RuntimeError('No MEG or EEG channels found')
+    else:
+        picks_list = [picks]
+        titles_list = ['Selected channels']
+        ax_list = [ax]
+
+    make_label = False
+    fig = None
+    if ax is None:
+        fig = plt.figure()
+        ax_list = list()
+        for ii in range(len(picks_list)):
+            # Make x-axes change together
+            if ii > 0:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
+                                           sharex=ax_list[0]))
+            else:
+                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
+        make_label = True
+    else:
+        fig = ax_list[0].get_figure()
+
+    return fig, picks_list, titles_list, ax_list, make_label
 
 
 @verbose
-def plot_raw_psds(raw, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
-                  proj=False, n_fft=2048, picks=None, ax=None, color='black',
-                  area_mode='std', area_alpha=0.33, n_jobs=1, verbose=None):
+def plot_raw_psd(raw, tmin=0., tmax=np.inf, fmin=0, fmax=np.inf, proj=False,
+                 n_fft=2048, picks=None, ax=None, color='black',
+                 area_mode='std', area_alpha=0.33,
+                 n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
     """Plot the power spectral density across channels
 
     Parameters
@@ -531,59 +446,40 @@ def plot_raw_psds(raw, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
         If None, no area will be plotted.
     area_alpha : float
         Alpha for the area.
+    n_overlap : int
+        The number of points of overlap between blocks. The default value
+        is 0 (no overlap).
+    dB : bool
+        If True, transform data to decibels.
+    show : bool
+        Show figure if True.
     n_jobs : int
         Number of jobs to run in parallel.
     verbose : bool, str, int, or None
         If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
     """
     import matplotlib.pyplot as plt
-    if area_mode not in [None, 'std', 'range']:
-        raise ValueError('"area_mode" must be "std", "range", or None')
-    if picks is None:
-        if ax is not None:
-            raise ValueError('If "ax" is not supplied (None), then "picks" '
-                             'must also be supplied')
-        megs = ['mag', 'grad', False]
-        eegs = [False, False, True]
-        names = ['Magnetometers', 'Gradiometers', 'EEG']
-        picks_list = list()
-        titles_list = list()
-        for meg, eeg, name in zip(megs, eegs, names):
-            picks = pick_types(raw.info, meg=meg, eeg=eeg, ref_meg=False)
-            if len(picks) > 0:
-                picks_list.append(picks)
-                titles_list.append(name)
-        if len(picks_list) == 0:
-            raise RuntimeError('No MEG or EEG channels found')
-    else:
-        picks_list = [picks]
-        titles_list = ['Selected channels']
-        ax_list = [ax]
-
-    make_label = False
-    fig = None
-    if ax is None:
-        fig = plt.figure()
-        ax_list = list()
-        for ii in range(len(picks_list)):
-            # Make x-axes change together
-            if ii > 0:
-                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
-                                           sharex=ax_list[0]))
-            else:
-                ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
-        make_label = True
-    else:
-        fig = ax_list[0].get_figure()
+    fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
+        raw.info, proj, picks, ax, area_mode)
 
     for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
                                                 ax_list)):
         psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
-                                      fmin=fmin, fmax=fmax, n_fft=n_fft,
-                                      n_jobs=n_jobs, plot=False, proj=proj)
+                                      fmin=fmin, fmax=fmax, proj=proj,
+                                      n_fft=n_fft, n_overlap=n_overlap,
+                                      n_jobs=n_jobs, verbose=None)
 
         # Convert PSDs to dB
-        psds = 10 * np.log10(psds)
+        if dB:
+            psds = 10 * np.log10(psds)
+            unit = 'dB'
+        else:
+            unit = 'power'
         psd_mean = np.mean(psds, axis=0)
         if area_mode == 'std':
             psd_std = np.std(psds, axis=0)
@@ -600,11 +496,177 @@ def plot_raw_psds(raw, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
         if make_label:
             if ii == len(picks_list) - 1:
                 ax.set_xlabel('Freq (Hz)')
-            if ii == len(picks_list) / 2:
-                ax.set_ylabel('Power Spectral Density (dB/Hz)')
+            if ii == len(picks_list) // 2:
+                ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
             ax.set_title(title)
             ax.set_xlim(freqs[0], freqs[-1])
     if make_label:
         tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
-    plt.show()
+    if show is True:
+        plt.show()
     return fig
+
+
+def _prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
+                            n_channels):
+    """Helper for setting up the mne_browse_raw window."""
+    import matplotlib.pyplot as plt
+    import matplotlib as mpl
+    size = get_config('MNE_BROWSE_RAW_SIZE')
+    if size is not None:
+        size = size.split(',')
+        size = tuple([float(s) for s in size])
+
+    fig = figure_nobar(facecolor=bgcolor, figsize=size)
+    fig.canvas.set_window_title('mne_browse_raw')
+    ax = plt.subplot2grid((10, 10), (0, 1), colspan=8, rowspan=9)
+    ax.set_title(title, fontsize=12)
+    ax_hscroll = plt.subplot2grid((10, 10), (9, 1), colspan=8)
+    ax_hscroll.get_yaxis().set_visible(False)
+    ax_hscroll.set_xlabel('Time (s)')
+    ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
+    ax_vscroll.set_axis_off()
+    ax_help_button = plt.subplot2grid((10, 10), (0, 0), colspan=1)
+    help_button = mpl.widgets.Button(ax_help_button, 'Help')
+    help_button.on_clicked(partial(_onclick_help, params=params))
+    # store these so they can be fixed on resize
+    params['fig'] = fig
+    params['ax'] = ax
+    params['ax_hscroll'] = ax_hscroll
+    params['ax_vscroll'] = ax_vscroll
+    params['ax_help_button'] = ax_help_button
+    params['help_button'] = help_button
+
+    # populate vertical and horizontal scrollbars
+    info = params['info']
+    for ci in range(len(info['ch_names'])):
+        this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
+                      else color)
+        if isinstance(this_color, dict):
+            this_color = this_color[params['types'][inds[ci]]]
+        ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
+                                                   facecolor=this_color,
+                                                   edgecolor=this_color))
+    vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
+                                       facecolor='w', edgecolor='w')
+    ax_vscroll.add_patch(vsel_patch)
+    params['vsel_patch'] = vsel_patch
+    hsel_patch = mpl.patches.Rectangle((params['t_start'], 0),
+                                       params['duration'], 1, edgecolor='k',
+                                       facecolor=(0.75, 0.75, 0.75),
+                                       alpha=0.25, linewidth=1, clip_on=False)
+    ax_hscroll.add_patch(hsel_patch)
+    params['hsel_patch'] = hsel_patch
+    ax_hscroll.set_xlim(0, params['n_times'] / float(info['sfreq']))
+    n_ch = len(info['ch_names'])
+    ax_vscroll.set_ylim(n_ch, 0)
+    ax_vscroll.set_title('Ch.')
+
+    # make shells for plotting traces
+    ylim = [n_channels * 2 + 1, 0]
+    offset = ylim[0] / n_channels
+    offsets = np.arange(n_channels) * offset + (offset / 2.)
+    ax.set_yticks(offsets)
+    ax.set_ylim(ylim)
+    ax.set_xlim(params['t_start'], params['t_start'] + params['duration'],
+                False)
+
+    params['offsets'] = offsets
+    params['lines'] = [ax.plot([np.nan], antialiased=False, linewidth=0.5)[0]
+                       for _ in range(n_ch)]
+    ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
+    vertline_color = (0., 0.75, 0.)
+    params['ax_vertline'] = ax.plot([0, 0], ylim, color=vertline_color,
+                                    zorder=-1)[0]
+    params['ax_vertline'].ch_name = ''
+    params['vertline_t'] = ax_hscroll.text(0, 1, '', color=vertline_color,
+                                           va='bottom', ha='right')
+    params['ax_hscroll_vertline'] = ax_hscroll.plot([0, 0], [0, 1],
+                                                    color=vertline_color,
+                                                    zorder=1)[0]
+
+
+def _plot_raw_traces(params, inds, color, bad_color, event_lines=None,
+                     event_color=None):
+    """Helper for plotting raw"""
+    lines = params['lines']
+    info = params['info']
+    n_channels = params['n_channels']
+    params['bad_color'] = bad_color
+    labels = params['ax'].yaxis.get_ticklabels()
+    # do the plotting
+    tick_list = list()
+    for ii in range(n_channels):
+        ch_ind = ii + params['ch_start']
+        # let's be generous here and allow users to pass
+        # n_channels per view >= the number of traces available
+        if ii >= len(lines):
+            break
+        elif ch_ind < len(info['ch_names']):
+            # scale to fit
+            ch_name = info['ch_names'][inds[ch_ind]]
+            tick_list += [ch_name]
+            offset = params['offsets'][ii]
+
+            # do NOT operate in-place lest this get screwed up
+            this_data = params['data'][inds[ch_ind]] * params['scale_factor']
+            this_color = bad_color if ch_name in info['bads'] else color
+            this_z = -1 if ch_name in info['bads'] else 0
+            if isinstance(this_color, dict):
+                this_color = this_color[params['types'][inds[ch_ind]]]
+
+            # subtraction here gets corect orientation for flipped ylim
+            lines[ii].set_ydata(offset - this_data)
+            lines[ii].set_xdata(params['times'])
+            lines[ii].set_color(this_color)
+            lines[ii].set_zorder(this_z)
+            vars(lines[ii])['ch_name'] = ch_name
+            vars(lines[ii])['def_color'] = color[params['types'][inds[ch_ind]]]
+
+            # set label color
+            this_color = bad_color if ch_name in info['bads'] else 'black'
+            labels[ii].set_color(this_color)
+        else:
+            # "remove" lines
+            lines[ii].set_xdata([])
+            lines[ii].set_ydata([])
+    # deal with event lines
+    if params['event_times'] is not None:
+        # find events in the time window
+        event_times = params['event_times']
+        mask = np.logical_and(event_times >= params['times'][0],
+                              event_times <= params['times'][-1])
+        event_times = event_times[mask]
+        event_nums = params['event_nums'][mask]
+        # plot them with appropriate colors
+        # go through the list backward so we end with -1, the catchall
+        used = np.zeros(len(event_times), bool)
+        ylim = params['ax'].get_ylim()
+        for ev_num, line in zip(sorted(event_color.keys())[::-1],
+                                event_lines[::-1]):
+            mask = (event_nums == ev_num) if ev_num >= 0 else ~used
+            assert not np.any(used[mask])
+            used[mask] = True
+            t = event_times[mask]
+            if len(t) > 0:
+                xs = list()
+                ys = list()
+                for tt in t:
+                    xs += [tt, tt, np.nan]
+                    ys += [0, ylim[0], np.nan]
+                line.set_xdata(xs)
+                line.set_ydata(ys)
+            else:
+                line.set_xdata([])
+                line.set_ydata([])
+    # finalize plot
+    params['ax'].set_xlim(params['times'][0],
+                          params['times'][0] + params['duration'], False)
+    params['ax'].set_yticklabels(tick_list)
+    params['vsel_patch'].set_y(params['ch_start'])
+    params['fig'].canvas.draw()
+    # XXX This is a hack to make sure this figure gets drawn last
+    # so that when matplotlib goes to calculate bounds we don't get a
+    # CGContextRef error on the MacOSX backend :(
+    if params['fig_proj'] is not None:
+        params['fig_proj'].canvas.draw()
diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py
index e39f410..7baa32a 100644
--- a/mne/viz/tests/test_3d.py
+++ b/mne/viz/tests/test_3d.py
@@ -3,6 +3,7 @@
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
 #          Mainak Jas <mainak at neuro.hut.fi>
+#          Mark Wronkiewicz <wronk.mark at gmail.com>
 #
 # License: Simplified BSD
 
@@ -10,64 +11,60 @@ import os.path as op
 import warnings
 
 import numpy as np
-from numpy.testing import assert_raises
+from numpy.testing import assert_raises, assert_equal
 
-from mne import SourceEstimate
-from mne import make_field_map, pick_channels_evoked, read_evokeds
+from mne import (make_field_map, pick_channels_evoked, read_evokeds,
+                 read_trans, read_dipole, SourceEstimate)
 from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
-                     plot_trans, mne_analyze_colormap)
-from mne.datasets import sample
+                     plot_trans)
+from mne.utils import requires_mayavi, requires_pysurfer, run_tests_if_main
+from mne.datasets import testing
 from mne.source_space import read_source_spaces
 
-data_dir = sample.data_path(download=False)
-base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
-evoked_fname = op.join(base_dir, 'test-ave.fif')
-subjects_dir = op.join(data_dir, 'subjects')
-
-lacks_mayavi = False
-try:
-    from mayavi import mlab
-except ImportError:
-    try:
-        from enthought.mayavi import mlab
-    except ImportError:
-        lacks_mayavi = True
-requires_mayavi = np.testing.dec.skipif(lacks_mayavi, 'Requires mayavi')
-
-if not lacks_mayavi:
-    mlab.options.backend = 'test'
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
 
+data_dir = testing.data_path(download=False)
+subjects_dir = op.join(data_dir, 'subjects')
+trans_fname = op.join(data_dir, 'MEG', 'sample',
+                      'sample_audvis_trunc-trans.fif')
+src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
+                    'sample-oct-6-src.fif')
+dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
+
+base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+evoked_fname = op.join(base_dir, 'test-ave.fif')
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
+ at requires_pysurfer
 @requires_mayavi
 def test_plot_sparse_source_estimates():
     """Test plotting of (sparse) source estimates
     """
-    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
-                                            'bem', 'sample-oct-6-src.fif'))
+    sample_src = read_source_spaces(src_fname)
 
     # dense version
     vertices = [s['vertno'] for s in sample_src]
     n_time = 5
     n_verts = sum(len(v) for v in vertices)
     stc_data = np.zeros((n_verts * n_time))
-    stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
+    stc_size = stc_data.size
+    stc_data[(np.random.rand(stc_size / 20) * stc_size).astype(int)] = \
+        np.random.rand(stc_data.size / 20)
     stc_data.shape = (n_verts, n_time)
     stc = SourceEstimate(stc_data, vertices, 1, 1)
-    colormap = mne_analyze_colormap(format='matplotlib')
-    # don't really need to test matplotlib method since it's not used now...
-    colormap = mne_analyze_colormap()
+    colormap = 'mne_analyze'
     plot_source_estimates(stc, 'sample', colormap=colormap,
                           config_opts={'background': (1, 1, 0)},
-                          subjects_dir=subjects_dir, colorbar=True)
+                          subjects_dir=subjects_dir, colorbar=True,
+                          clim='auto')
     assert_raises(TypeError, plot_source_estimates, stc, 'sample',
-                  figure='foo', hemi='both')
+                  figure='foo', hemi='both', clim='auto')
 
     # now do sparse version
     vertices = sample_src[0]['vertno']
@@ -81,35 +78,117 @@ def test_plot_sparse_source_estimates():
                                  opacity=0.5, high_resolution=False)
 
 
+ at testing.requires_testing_data
 @requires_mayavi
- at sample.requires_sample_data
 def test_plot_evoked_field():
     """Test plotting evoked field
     """
-    trans_fname = op.join(data_dir, 'MEG', 'sample',
-                          'sample_audvis_raw-trans.fif')
     evoked = read_evokeds(evoked_fname, condition='Left Auditory',
                           baseline=(-0.2, 0.0))
     evoked = pick_channels_evoked(evoked, evoked.ch_names[::10])  # speed
     for t in ['meg', None]:
-        maps = make_field_map(evoked, trans_fname=trans_fname,
-                              subject='sample', subjects_dir=subjects_dir,
-                              n_jobs=1, ch_type=t)
+        maps = make_field_map(evoked, trans_fname, subject='sample',
+                              subjects_dir=subjects_dir, n_jobs=1, ch_type=t)
 
         evoked.plot_field(maps, time=0.1)
 
 
+ at testing.requires_testing_data
 @requires_mayavi
- at sample.requires_sample_data
 def test_plot_trans():
     """Test plotting of -trans.fif files
     """
-    trans_fname = op.join(data_dir, 'MEG', 'sample',
-                          'sample_audvis_raw-trans.fif')
     evoked = read_evokeds(evoked_fname, condition='Left Auditory',
                           baseline=(-0.2, 0.0))
-    plot_trans(evoked.info, trans_fname=trans_fname, subject='sample',
+    plot_trans(evoked.info, trans_fname, subject='sample',
                subjects_dir=subjects_dir)
-    assert_raises(ValueError, plot_trans, evoked.info, trans_fname=trans_fname,
+    assert_raises(ValueError, plot_trans, evoked.info, trans_fname,
                   subject='sample', subjects_dir=subjects_dir,
                   ch_type='bad-chtype')
+
+
+ at testing.requires_testing_data
+ at requires_pysurfer
+ at requires_mayavi
+def test_limits_to_control_points():
+    """Test functionality for determing control points
+    """
+    sample_src = read_source_spaces(src_fname)
+
+    vertices = [s['vertno'] for s in sample_src]
+    n_time = 5
+    n_verts = sum(len(v) for v in vertices)
+    stc_data = np.random.rand((n_verts * n_time))
+    stc_data.shape = (n_verts, n_time)
+    stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
+
+    # Test for simple use cases
+    from mayavi import mlab
+    stc.plot(subjects_dir=subjects_dir)
+    stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
+    stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
+             subjects_dir=subjects_dir)
+    stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
+    stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
+    figs = [mlab.figure(), mlab.figure()]
+    assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
+                  subjects_dir=subjects_dir)
+
+    # Test both types of incorrect limits key (lims/pos_lims)
+    assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
+                  clim=dict(kind='value', lims=(5, 10, 15)),
+                  subjects_dir=subjects_dir)
+    assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
+                  clim=dict(kind='value', pos_lims=(5, 10, 15)),
+                  subjects_dir=subjects_dir)
+
+    # Test for correct clim values
+    assert_raises(ValueError, stc.plot,
+                  clim=dict(kind='value', pos_lims=[0, 1, 0]),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, colormap='mne',
+                  clim=dict(pos_lims=(5, 10, 15, 20)),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot,
+                  clim=dict(pos_lims=(5, 10, 15), kind='foo'),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
+                  subjects_dir=subjects_dir)
+    assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
+                  subjects_dir=subjects_dir)
+
+    # Test handling of degenerate data
+    stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
+             subjects_dir=subjects_dir)  # ok
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        # thresholded maps
+        stc._data.fill(1.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 0)
+        stc._data[0].fill(0.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 0)
+        stc._data.fill(0.)
+        plot_source_estimates(stc, subjects_dir=subjects_dir)
+        assert_equal(len(w), 1)
+    mlab.close()
+
+
+ at testing.requires_testing_data
+ at requires_mayavi
+def test_plot_dipole_locations():
+    """Test plotting dipole locations
+    """
+    dipoles = read_dipole(dip_fname)
+    trans = read_trans(trans_fname)
+    dipoles.plot_locations(trans, 'sample', subjects_dir, fig_name='foo')
+    assert_raises(ValueError, dipoles.plot_locations, trans, 'sample',
+                  subjects_dir, mode='foo')
+
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_circle.py b/mne/viz/tests/test_circle.py
index 3e6ded6..1999221 100644
--- a/mne/viz/tests/test_circle.py
+++ b/mne/viz/tests/test_circle.py
@@ -13,12 +13,12 @@ from mne.viz import plot_connectivity_circle, circular_layout
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
 
 def test_plot_connectivity_circle():
     """Test plotting connectivity circle
     """
+    import matplotlib.pyplot as plt
     node_order = ['frontalpole-lh', 'parsorbitalis-lh',
                   'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
                   'medialorbitofrontal-lh', 'parstriangularis-lh',
@@ -87,8 +87,8 @@ def test_plot_connectivity_circle():
                              node_angles=node_angles, title='test',
                              )
 
-    plt.close('all')
     assert_raises(ValueError, circular_layout, label_names, node_order,
                   group_boundaries=[-1])
     assert_raises(ValueError, circular_layout, label_names, node_order,
                   group_boundaries=[20, 0])
+    plt.close('all')
diff --git a/mne/viz/tests/test_decoding.py b/mne/viz/tests/test_decoding.py
new file mode 100644
index 0000000..b81ae21
--- /dev/null
+++ b/mne/viz/tests/test_decoding.py
@@ -0,0 +1,124 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Jean-Remi King <jeanremi.king at gmail.com>
+#
+# License: Simplified BSD
+
+import os.path as op
+import warnings
+
+from nose.tools import assert_raises, assert_equals
+
+import numpy as np
+
+from mne.epochs import equalize_epoch_counts, concatenate_epochs
+from mne.decoding import GeneralizationAcrossTime
+from mne import io, Epochs, read_events, pick_types
+from mne.utils import requires_sklearn, run_tests_if_main
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
+
+
+data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
+raw_fname = op.join(data_dir, 'test_raw.fif')
+event_name = op.join(data_dir, 'test-eve.fif')
+
+
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
+
+
+def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
+              event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
+    """Aux function for testing GAT viz"""
+    gat = GeneralizationAcrossTime()
+    raw = io.Raw(raw_fname, preload=False)
+    events = read_events(event_name)
+    picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
+                       eog=False, exclude='bads')
+    picks = picks[1:13:3]
+    decim = 30
+    # Test on time generalization within one condition
+    with warnings.catch_warnings(record=True):
+        epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                        baseline=(None, 0), preload=True, decim=decim)
+    epochs_list = [epochs[k] for k in event_id]
+    equalize_epoch_counts(epochs_list)
+    epochs = concatenate_epochs(epochs_list)
+
+    # Test default running
+    gat = GeneralizationAcrossTime(test_times=test_times)
+    gat.fit(epochs)
+    gat.score(epochs)
+    return gat
+
+
+ at requires_sklearn
+def test_gat_plot_matrix():
+    """Test GAT matrix plot"""
+    gat = _get_data()
+    gat.plot()
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_diagonal():
+    """Test GAT diagonal plot"""
+    gat = _get_data()
+    gat.plot_diagonal()
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_times():
+    """Test GAT times plot"""
+    gat = _get_data()
+    # test one line
+    gat.plot_times(gat.train_times_['times'][0])
+    # test multiple lines
+    gat.plot_times(gat.train_times_['times'])
+    # test multiple colors
+    n_times = len(gat.train_times_['times'])
+    colors = np.tile(['r', 'g', 'b'],
+                     int(np.ceil(n_times / 3)))[:n_times]
+    gat.plot_times(gat.train_times_['times'], color=colors)
+    # test invalid time point
+    assert_raises(ValueError, gat.plot_times, -1.)
+    # test float type
+    assert_raises(ValueError, gat.plot_times, 1)
+    assert_raises(ValueError, gat.plot_times, 'diagonal')
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+def chance(ax):
+    return ax.get_children()[1].get_lines()[0].get_ydata()[0]
+
+
+ at requires_sklearn
+def test_gat_chance_level():
+    """Test GAT plot_times chance level"""
+    gat = _get_data()
+    ax = gat.plot_diagonal(chance=False)
+    ax = gat.plot_diagonal()
+    assert_equals(chance(ax), .5)
+    gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
+    ax = gat.plot_diagonal()
+    assert_equals(chance(ax), .25)
+    ax = gat.plot_diagonal(chance=1.234)
+    assert_equals(chance(ax), 1.234)
+    assert_raises(ValueError, gat.plot_diagonal, chance='foo')
+    del gat.scores_
+    assert_raises(RuntimeError, gat.plot)
+
+
+ at requires_sklearn
+def test_gat_plot_nonsquared():
+    """Test GAT diagonal plot"""
+    gat = _get_data(test_times=dict(start=0.))
+    gat.plot()
+    ax = gat.plot_diagonal()
+    scores = ax.get_children()[1].get_lines()[2].get_ydata()
+    assert_equals(len(scores), len(gat.estimators_))
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py
index 0f13843..6f3a3b4 100644
--- a/mne/viz/tests/test_epochs.py
+++ b/mne/viz/tests/test_epochs.py
@@ -2,40 +2,38 @@
 #          Denis Engemann <denis.engemann at gmail.com>
 #          Martin Luessi <mluessi at nmr.mgh.harvard.edu>
 #          Eric Larson <larson.eric.d at gmail.com>
+#          Jaakko Leppakangas <jaeilepp at student.jyu.fi>
 #
 # License: Simplified BSD
 
 import os.path as op
 import warnings
+from nose.tools import assert_raises
 
 import numpy as np
 
-# Set our plotters to test mode
-import matplotlib
-matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
 from mne import io, read_events, Epochs
 from mne import pick_types
-from mne.layouts import read_layout
-from mne.datasets import sample
+from mne.utils import run_tests_if_main, requires_version
+from mne.channels import read_layout
 
-from mne.viz import plot_drop_log, plot_image_epochs
+from mne.viz import plot_drop_log, plot_epochs_image, plot_image_epochs
+from mne.viz.utils import _fake_click
 
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
-
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 evoked_fname = op.join(base_dir, 'test-ave.fif')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 cov_fname = op.join(base_dir, 'test-cov.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
-event_id, tmin, tmax = 1, -0.1, 0.1
+event_id, tmin, tmax = 1, -0.1, 1.0
 n_chan = 15
 layout = read_layout('Vectorview-all')
 
@@ -76,34 +74,75 @@ def _get_epochs_delayed_ssp():
 
 
 def test_plot_epochs():
-    """ Test plotting epochs
-    """
+    """Test epoch plotting"""
+    import matplotlib.pyplot as plt
     epochs = _get_epochs()
-    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
-    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
-    # test clicking: should increase coverage on
-    # 3200-3226, 3235, 3237, 3239-3242, 3245-3255, 3260-3280
-    fig = plt.gcf()
-    fig.canvas.button_press_event(10, 10, 'left')
-    # now let's add a bad channel
-    epochs.info['bads'] = [epochs.ch_names[0]]  # include a bad one
-    epochs.plot([0, 1], picks=[0, 2, 3], scalings=None, title_str='%s')
-    epochs[0].plot(picks=[0, 2, 3], scalings=None, title_str='%s')
+    epochs.plot(scalings=None, title='Epochs')
     plt.close('all')
-
-
-def test_plot_image_epochs():
+    fig = epochs[0].plot(picks=[0, 2, 3], scalings=None)
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+    fig = epochs.plot()
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('right')
+    fig.canvas.scroll_event(0.5, 0.5, -0.5)  # scroll down
+    fig.canvas.scroll_event(0.5, 0.5, 0.5)  # scroll up
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('b')
+    fig.canvas.key_press_event('f11')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('?')
+    fig.canvas.key_press_event('h')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('end')
+    fig.canvas.resize_event()
+    fig.canvas.close_event()  # closing and epoch dropping
+    plt.close('all')
+    assert_raises(RuntimeError, epochs.plot, picks=[])
+    plt.close('all')
+    with warnings.catch_warnings(record=True):
+        fig = epochs.plot()
+        # test mouse clicks
+        x = fig.get_axes()[0].get_xlim()[1] / 2
+        y = fig.get_axes()[0].get_ylim()[0] / 2
+        data_ax = fig.get_axes()[0]
+        n_epochs = len(epochs)
+        _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad epoch
+        _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad epoch
+        _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in 1st axes
+        _fake_click(fig, data_ax, [-0.1, 0.9])  # click on y-label
+        _fake_click(fig, data_ax, [-0.1, 0.9], button=3)
+        _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change epochs
+        _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # change channels
+        fig.canvas.close_event()  # closing and epoch dropping
+        assert(n_epochs - 1 == len(epochs))
+        plt.close('all')
+
+
+def test_plot_epochs_image():
     """Test plotting of epochs image
     """
+    import matplotlib.pyplot as plt
     epochs = _get_epochs()
-    plot_image_epochs(epochs, picks=[1, 2])
+    plot_epochs_image(epochs, picks=[1, 2])
     plt.close('all')
+    with warnings.catch_warnings(record=True):
+        plot_image_epochs(epochs, picks=[1, 2])
+        plt.close('all')
 
 
 def test_plot_drop_log():
     """Test plotting a drop log
     """
+    import matplotlib.pyplot as plt
     epochs = _get_epochs()
+    assert_raises(ValueError, epochs.plot_drop_log)
     epochs.drop_bad_epochs()
 
     warnings.simplefilter('always', UserWarning)
@@ -115,3 +154,18 @@ def test_plot_drop_log():
         plot_drop_log([['One'], ['One', 'Two'], []])
     plt.close('all')
 
+
+ at requires_version('scipy', '0.12')
+def test_plot_psd_epochs():
+    """Test plotting epochs psd (+topomap)
+    """
+    import matplotlib.pyplot as plt
+    epochs = _get_epochs()
+    epochs.plot_psd()
+    assert_raises(RuntimeError, epochs.plot_psd_topomap,
+                  bands=[(0, 0.01, 'foo')])  # no freqs in range
+    epochs.plot_psd_topomap()
+    plt.close('all')
+
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py
index d4bb40f..e2c308e 100644
--- a/mne/viz/tests/test_evoked.py
+++ b/mne/viz/tests/test_evoked.py
@@ -13,24 +13,20 @@ import warnings
 import numpy as np
 from numpy.testing import assert_raises
 
+
+from mne import io, read_events, Epochs, pick_types, read_cov
+from mne.viz.evoked import _butterfly_onselect
+from mne.viz.utils import _fake_click
+from mne.utils import slow_test, run_tests_if_main
+from mne.channels import read_layout
+
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
-
-from mne import io, read_events, Epochs
-from mne import pick_types
-from mne.layouts import read_layout
-from mne.datasets import sample
-
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
-
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 evoked_fname = op.join(base_dir, 'test-ave.fif')
 raw_fname = op.join(base_dir, 'test_raw.fif')
@@ -59,9 +55,11 @@ def _get_epochs():
     events = _get_events()
     picks = _get_picks(raw)
     # Use a subset of channels for plotting speed
-    picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
+    picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
+    picks[0] = 2  # make sure we have a magnetometer
     epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0))
+    epochs.info['bads'] = [epochs.ch_names[-1]]
     return epochs
 
 
@@ -76,12 +74,21 @@ def _get_epochs_delayed_ssp():
     return epochs_delayed_ssp
 
 
+ at slow_test
 def test_plot_evoked():
     """Test plotting of evoked
     """
+    import matplotlib.pyplot as plt
     evoked = _get_epochs().average()
     with warnings.catch_warnings(record=True):
-        evoked.plot(proj=True, hline=[1])
+        fig = evoked.plot(proj=True, hline=[1], exclude=[])
+        # Test a click
+        ax = fig.get_axes()[0]
+        line = ax.lines[0]
+        _fake_click(fig, ax,
+                    [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+        _fake_click(fig, ax,
+                    [ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
         # plot with bad channels excluded
         evoked.plot(exclude='bads')
         evoked.plot(exclude=evoked.info['bads'])  # does the same thing
@@ -98,9 +105,33 @@ def test_plot_evoked():
                       proj='interactive')
         assert_raises(RuntimeError, evoked_delayed_ssp.plot,
                       proj='interactive', axes='foo')
+        plt.close('all')
+
+        # test GFP plot overlay
+        evoked.plot(gfp=True)
+        evoked.plot(gfp='only')
+        assert_raises(ValueError, evoked.plot, gfp='foo')
 
         evoked.plot_image(proj=True)
         # plot with bad channels excluded
         evoked.plot_image(exclude='bads')
         evoked.plot_image(exclude=evoked.info['bads'])  # does the same thing
         plt.close('all')
+
+        evoked.plot_topo()  # should auto-find layout
+        _butterfly_onselect(0, 200, ['mag'], evoked)  # test averaged topomap
+        plt.close('all')
+
+        cov = read_cov(cov_fname)
+        cov['method'] = 'empirical'
+        evoked.plot_white(cov)
+        evoked.plot_white([cov, cov])
+
+        # Hack to test plotting of maxfiltered data
+        evoked_sss = evoked.copy()
+        evoked_sss.info['proc_history'] = [dict(max_info=None)]
+        evoked_sss.plot_white(cov)
+        evoked_sss.plot_white(cov_fname)
+        plt.close('all')
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py
index 8532902..ae0ce93 100644
--- a/mne/viz/tests/test_ica.py
+++ b/mne/viz/tests/test_ica.py
@@ -4,29 +4,21 @@
 # License: Simplified BSD
 
 import os.path as op
-from functools import wraps
 import warnings
 
 from numpy.testing import assert_raises
 
 from mne import io, read_events, Epochs, read_cov
 from mne import pick_types
-from mne.datasets import sample
-from mne.utils import check_sklearn_version
+from mne.utils import run_tests_if_main, requires_sklearn
+from mne.viz.utils import _fake_click
 from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
 
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
-
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
-
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 evoked_fname = op.join(base_dir, 'test-ave.fif')
@@ -36,21 +28,8 @@ event_name = op.join(base_dir, 'test-eve.fif')
 event_id, tmin, tmax = 1, -0.1, 0.2
 
 
-def requires_sklearn(function):
-    """Decorator to skip test if scikit-learn >= 0.12 is not available"""
-    @wraps(function)
-    def dec(*args, **kwargs):
-        if not check_sklearn_version(min_version='0.12'):
-            from nose.plugins.skip import SkipTest
-            raise SkipTest('Test %s skipped, requires scikit-learn >= 0.12'
-                           % function.__name__)
-        ret = function(*args, **kwargs)
-        return ret
-    return dec
-
-
-def _get_raw():
-    return io.Raw(raw_fname, preload=False)
+def _get_raw(preload=False):
+    return io.Raw(raw_fname, preload=preload)
 
 
 def _get_events():
@@ -74,6 +53,7 @@ def _get_epochs():
 def test_plot_ica_components():
     """Test plotting of ICA solutions
     """
+    import matplotlib.pyplot as plt
     raw = _get_raw()
     ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
               max_pca_components=3, n_pca_components=3)
@@ -84,7 +64,8 @@ def test_plot_ica_components():
         for components in [0, [0], [0, 1], [0, 1] * 2, None]:
             ica.plot_components(components, image_interp='bilinear', res=16)
     ica.info = None
-    assert_raises(RuntimeError, ica.plot_components, 1)
+    assert_raises(ValueError, ica.plot_components, 1)
+    assert_raises(RuntimeError, ica.plot_components, 1, ch_type='mag')
     plt.close('all')
 
 
@@ -92,7 +73,10 @@ def test_plot_ica_components():
 def test_plot_ica_sources():
     """Test plotting of ICA panel
     """
-    raw = io.Raw(raw_fname, preload=True)
+    import matplotlib.pyplot as plt
+    raw = io.Raw(raw_fname, preload=False)
+    raw.crop(0, 1, copy=False)
+    raw.load_data()
     picks = _get_picks(raw)
     epochs = _get_epochs()
     raw.pick_channels([raw.ch_names[k] for k in picks])
@@ -100,10 +84,27 @@ def test_plot_ica_sources():
                            ecg=False, eog=False, exclude='bads')
     ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
     ica.fit(raw, picks=ica_picks)
-    ica.plot_sources(raw)
+    raw.info['bads'] = ['MEG 0113']
+    assert_raises(RuntimeError, ica.plot_sources, inst=raw)
     ica.plot_sources(epochs)
+    epochs.info['bads'] = ['MEG 0113']
+    assert_raises(RuntimeError, ica.plot_sources, inst=epochs)
+    epochs.info['bads'] = []
     with warnings.catch_warnings(record=True):  # no labeled objects mpl
         ica.plot_sources(epochs.average())
+        evoked = epochs.average()
+        fig = ica.plot_sources(evoked)
+        # Test a click
+        ax = fig.get_axes()[0]
+        line = ax.lines[0]
+        _fake_click(fig, ax,
+                    [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+        _fake_click(fig, ax,
+                    [ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
+        # plot with bad channels excluded
+        ica.plot_sources(evoked, exclude=[0])
+        ica.exclude = [0]
+        ica.plot_sources(evoked)  # does the same thing
     assert_raises(ValueError, ica.plot_sources, 'meeow')
     plt.close('all')
 
@@ -112,7 +113,8 @@ def test_plot_ica_sources():
 def test_plot_ica_overlay():
     """Test plotting of ICA cleaning
     """
-    raw = _get_raw()
+    import matplotlib.pyplot as plt
+    raw = _get_raw(preload=True)
     picks = _get_picks(raw)
     ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
               max_pca_components=3, n_pca_components=3)
@@ -123,6 +125,7 @@ def test_plot_ica_overlay():
     eog_epochs = create_eog_epochs(raw, picks=picks)
     ica.plot_overlay(eog_epochs.average())
     assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
+    ica.plot_overlay(raw)
     plt.close('all')
 
 
@@ -130,6 +133,7 @@ def test_plot_ica_overlay():
 def test_plot_ica_scores():
     """Test plotting of ICA scores
     """
+    import matplotlib.pyplot as plt
     raw = _get_raw()
     picks = _get_picks(raw)
     ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
@@ -138,3 +142,59 @@ def test_plot_ica_scores():
     ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
     assert_raises(ValueError, ica.plot_scores, [0.2])
     plt.close('all')
+
+
+ at requires_sklearn
+def test_plot_instance_components():
+    """Test plotting of components as instances of raw and epochs."""
+    import matplotlib.pyplot as plt
+    raw = _get_raw()
+    picks = _get_picks(raw)
+    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
+              max_pca_components=3, n_pca_components=3)
+    ica.fit(raw, picks=picks)
+    fig = ica.plot_sources(raw, exclude=[0], title='Components')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('right')
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('end')
+    fig.canvas.key_press_event('f11')
+    ax = fig.get_axes()[0]
+    line = ax.lines[0]
+    _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+    _fake_click(fig, ax, [-0.1, 0.9])  # click on y-label
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+    epochs = _get_epochs()
+    fig = ica.plot_sources(epochs, exclude=[0], title='Components')
+    fig.canvas.key_press_event('down')
+    fig.canvas.key_press_event('up')
+    fig.canvas.key_press_event('right')
+    fig.canvas.key_press_event('left')
+    fig.canvas.key_press_event('o')
+    fig.canvas.key_press_event('-')
+    fig.canvas.key_press_event('+')
+    fig.canvas.key_press_event('=')
+    fig.canvas.key_press_event('pageup')
+    fig.canvas.key_press_event('pagedown')
+    fig.canvas.key_press_event('home')
+    fig.canvas.key_press_event('end')
+    fig.canvas.key_press_event('f11')
+    # Test a click
+    ax = fig.get_axes()[0]
+    line = ax.lines[0]
+    _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
+    _fake_click(fig, ax, [-0.1, 0.9])  # click on y-label
+    fig.canvas.key_press_event('escape')
+    plt.close('all')
+
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_misc.py b/mne/viz/tests/test_misc.py
index 8b76ccf..fd38840 100644
--- a/mne/viz/tests/test_misc.py
+++ b/mne/viz/tests/test_misc.py
@@ -13,30 +13,31 @@ import warnings
 import numpy as np
 from numpy.testing import assert_raises
 
-from mne import io, read_events, read_cov, read_source_spaces
-from mne import SourceEstimate
-from mne.datasets import sample
-
-from mne.viz import plot_cov, plot_bem, plot_events
-from mne.viz import plot_source_spectrogram
-
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
+from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
+                 read_dipole, SourceEstimate)
+from mne.datasets import testing
+from mne.minimum_norm import read_inverse_operator
+from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
+                     plot_snr_estimate)
+from mne.utils import requires_nibabel, run_tests_if_main, slow_test
 
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
-
+data_path = testing.data_path(download=False)
+subjects_dir = op.join(data_path, 'subjects')
+inv_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
+evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
+dip_fname = op.join(data_path, 'MEG', 'sample',
+                    'sample_audvis_trunc_set1.dip')
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 cov_fname = op.join(base_dir, 'test-cov.fif')
-event_name = op.join(base_dir, 'test-eve.fif')
+event_fname = op.join(base_dir, 'test-eve.fif')
 
 
 def _get_raw():
@@ -44,7 +45,7 @@ def _get_raw():
 
 
 def _get_events():
-    return read_events(event_name)
+    return read_events(event_fname)
 
 
 def test_plot_cov():
@@ -52,11 +53,11 @@ def test_plot_cov():
     """
     raw = _get_raw()
     cov = read_cov(cov_fname)
-    fig1, fig2 = plot_cov(cov, raw.info, proj=True, exclude=raw.ch_names[6:])
-    plt.close('all')
+    fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
+ at requires_nibabel()
 def test_plot_bem():
     """Test plotting of BEM contours
     """
@@ -65,7 +66,7 @@ def test_plot_bem():
     assert_raises(ValueError, plot_bem, subject='sample',
                   subjects_dir=subjects_dir, orientation='bad-ori')
     plot_bem(subject='sample', subjects_dir=subjects_dir,
-             orientation='sagittal', slices=[50, 100])
+             orientation='sagittal', slices=[25, 50])
 
 
 def test_plot_events():
@@ -93,11 +94,11 @@ def test_plot_events():
                       raw.first_samp, event_id={'aud_l': 111}, color=color)
 
 
- at sample.requires_sample_data
+ at testing.requires_testing_data
 def test_plot_source_spectrogram():
     """Test plotting of source spectrogram
     """
-    sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
+    sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
                                             'bem', 'sample-oct-6-src.fif'))
 
     # dense version
@@ -112,3 +113,23 @@ def test_plot_source_spectrogram():
                   [[1, 2], [3, 4]], tmin=0)
     assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
                   [[1, 2], [3, 4]], tmax=7)
+
+
+ at slow_test
+ at testing.requires_testing_data
+def test_plot_snr():
+    """Test plotting SNR estimate
+    """
+    inv = read_inverse_operator(inv_fname)
+    evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
+    plot_snr_estimate(evoked, inv)
+
+
+ at testing.requires_testing_data
+def test_plot_dipole_amplitudes():
+    """Test plotting dipole amplitudes
+    """
+    dipoles = read_dipole(dip_fname)
+    dipoles.plot_amplitudes(show=False)
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_montage.py b/mne/viz/tests/test_montage.py
new file mode 100644
index 0000000..6ea5b44
--- /dev/null
+++ b/mne/viz/tests/test_montage.py
@@ -0,0 +1,30 @@
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+#          Teon Brooks <teon.brooks at gmail.com>
+#
+# License: Simplified BSD
+
+# Set our plotters to test mode
+import matplotlib
+import os.path as op
+matplotlib.use('Agg')  # for testing don't use X server
+
+from mne.channels import read_montage, read_dig_montage  # noqa
+
+
+p_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', 'tests', 'data')
+elp = op.join(p_dir, 'test_elp.txt')
+hsp = op.join(p_dir, 'test_hsp.txt')
+hpi = op.join(p_dir, 'test_mrk.sqd')
+point_names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
+
+
+def test_plot_montage():
+    """Test plotting montages
+    """
+    m = read_montage('easycap-M1')
+    m.plot()
+    m.plot(show_names=True)
+    d = read_dig_montage(hsp, hpi, elp, point_names)
+    d.plot()
+    d.plot(show_names=True)
diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py
index e76a7db..311215c 100644
--- a/mne/viz/tests/test_raw.py
+++ b/mne/viz/tests/test_raw.py
@@ -8,38 +8,20 @@ import warnings
 from numpy.testing import assert_raises
 
 from mne import io, read_events, pick_types
-from mne.datasets import sample
-
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
+from mne.utils import requires_version, run_tests_if_main
+from mne.viz.utils import _fake_click
 
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 event_name = op.join(base_dir, 'test-eve.fif')
 
 
-def _fake_click(fig, ax, point, xform='ax'):
-    """Helper to fake a click at a relative point within axes"""
-    if xform == 'ax':
-        x, y = ax.transAxes.transform_point(point)
-    elif xform == 'data':
-        x, y = ax.transData.transform_point(point)
-    else:
-        raise ValueError('unknown transform')
-    try:
-        fig.canvas.button_press_event(x, y, 1, False, None)
-    except:  # for old MPL
-        fig.canvas.button_press_event(x, y, 1, False)
-
-
 def _get_raw():
     raw = io.Raw(raw_fname, preload=True)
     raw.pick_channels(raw.ch_names[:9])
@@ -53,6 +35,7 @@ def _get_events():
 def test_plot_raw():
     """Test plotting of raw data
     """
+    import matplotlib.pyplot as plt
     raw = _get_raw()
     events = _get_events()
     plt.close('all')  # ensure all are closed
@@ -65,43 +48,78 @@ def test_plot_raw():
         _fake_click(fig, data_ax, [x, y], xform='data')  # mark a bad channel
         _fake_click(fig, data_ax, [x, y], xform='data')  # unmark a bad channel
         _fake_click(fig, data_ax, [0.5, 0.999])  # click elsewhere in 1st axes
+        _fake_click(fig, data_ax, [-0.1, 0.9])  # click on y-label
         _fake_click(fig, fig.get_axes()[1], [0.5, 0.5])  # change time
         _fake_click(fig, fig.get_axes()[2], [0.5, 0.5])  # change channels
         _fake_click(fig, fig.get_axes()[3], [0.5, 0.5])  # open SSP window
         fig.canvas.button_press_event(1, 1, 1)  # outside any axes
+        fig.canvas.scroll_event(0.5, 0.5, -0.5)  # scroll down
+        fig.canvas.scroll_event(0.5, 0.5, 0.5)  # scroll up
         # sadly these fail when no renderer is used (i.e., when using Agg):
-        #ssp_fig = set(plt.get_fignums()) - set([fig.number])
-        #assert_equal(len(ssp_fig), 1)
-        #ssp_fig = plt.figure(list(ssp_fig)[0])
-        #ax = ssp_fig.get_axes()[0]  # only one axis is used
-        #t = [c for c in ax.get_children() if isinstance(c,
-        #     matplotlib.text.Text)]
-        #pos = np.array(t[0].get_position()) + 0.01
-        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # off
-        #_fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # on
-        # test keypresses
+        # ssp_fig = set(plt.get_fignums()) - set([fig.number])
+        # assert_equal(len(ssp_fig), 1)
+        # ssp_fig = plt.figure(list(ssp_fig)[0])
+        # ax = ssp_fig.get_axes()[0]  # only one axis is used
+        # t = [c for c in ax.get_children() if isinstance(c,
+        #      matplotlib.text.Text)]
+        # pos = np.array(t[0].get_position()) + 0.01
+        # _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # off
+        # _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data')  # on
+        #  test keypresses
         fig.canvas.key_press_event('escape')
         fig.canvas.key_press_event('down')
         fig.canvas.key_press_event('up')
         fig.canvas.key_press_event('right')
         fig.canvas.key_press_event('left')
         fig.canvas.key_press_event('o')
+        fig.canvas.key_press_event('-')
+        fig.canvas.key_press_event('+')
+        fig.canvas.key_press_event('=')
+        fig.canvas.key_press_event('pageup')
+        fig.canvas.key_press_event('pagedown')
+        fig.canvas.key_press_event('home')
+        fig.canvas.key_press_event('end')
+        fig.canvas.key_press_event('?')
+        fig.canvas.key_press_event('f11')
         fig.canvas.key_press_event('escape')
+        # Color setting
+        assert_raises(KeyError, raw.plot, event_color={0: 'r'})
+        assert_raises(TypeError, raw.plot, event_color={'foo': 'r'})
+        fig = raw.plot(events=events, event_color={-1: 'r', 998: 'b'})
         plt.close('all')
 
 
-def test_plot_raw_psds():
+ at requires_version('scipy', '0.10')
+def test_plot_raw_filtered():
+    """Test filtering of raw plots
+    """
+    raw = _get_raw()
+    assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
+    assert_raises(ValueError, raw.plot, highpass=0)
+    assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
+    assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
+    assert_raises(ValueError, raw.plot, clipping='foo')
+    raw.plot(lowpass=1, clipping='transparent')
+    raw.plot(highpass=1, clipping='clamp')
+    raw.plot(highpass=1, lowpass=2)
+
+
+ at requires_version('scipy', '0.12')
+def test_plot_raw_psd():
     """Test plotting of raw psds
     """
     import matplotlib.pyplot as plt
     raw = _get_raw()
     # normal mode
-    raw.plot_psds(tmax=2.0)
+    raw.plot_psd(tmax=2.0)
     # specific mode
     picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
-    raw.plot_psds(picks=picks, area_mode='range')
+    raw.plot_psd(picks=picks, area_mode='range')
     ax = plt.axes()
     # if ax is supplied, picks must be, too:
-    assert_raises(ValueError, raw.plot_psds, ax=ax)
-    raw.plot_psds(picks=picks, ax=ax)
+    assert_raises(ValueError, raw.plot_psd, ax=ax)
+    raw.plot_psd(picks=picks, ax=ax)
     plt.close('all')
+
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_topo.py b/mne/viz/tests/test_topo.py
index a0f2b24..127c4af 100644
--- a/mne/viz/tests/test_topo.py
+++ b/mne/viz/tests/test_topo.py
@@ -7,31 +7,29 @@
 
 import os.path as op
 import warnings
+from collections import namedtuple
 
 import numpy as np
 from numpy.testing import assert_raises
 
-# Set our plotters to test mode
-import matplotlib
-matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
 from mne import io, read_events, Epochs
 from mne import pick_channels_evoked
-from mne.layouts import read_layout
-from mne.datasets import sample
+from mne.channels import read_layout
 from mne.time_frequency.tfr import AverageTFR
+from mne.utils import run_tests_if_main
 
-from mne.viz import plot_topo, plot_topo_image_epochs
+from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
+                     mne_analyze_colormap, plot_evoked_topo)
+from mne.viz.topo import _plot_update_evoked_topo
 
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
-data_dir = sample.data_path(download=False)
-subjects_dir = op.join(data_dir, 'subjects')
-ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
-
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 evoked_fname = op.join(base_dir, 'test-ave.fif')
 raw_fname = op.join(base_dir, 'test_raw.fif')
@@ -49,7 +47,7 @@ def _get_events():
 
 
 def _get_picks(raw):
-    return [0, 1, 2, 6, 7, 8, 12, 13, 14]  # take a only few channels
+    return [0, 1, 2, 6, 7, 8, 340, 341, 342]  # take a only few channels
 
 
 def _get_epochs():
@@ -75,11 +73,14 @@ def _get_epochs_delayed_ssp():
 def test_plot_topo():
     """Test plotting of ERP topography
     """
+    import matplotlib.pyplot as plt
     # Show topography
     evoked = _get_epochs().average()
-    plot_topo(evoked, layout)
+    plot_evoked_topo(evoked)  # should auto-find layout
     warnings.simplefilter('always', UserWarning)
-    picked_evoked = pick_channels_evoked(evoked, evoked.ch_names[:3])
+    picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
+    picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
+    picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
 
     # test scaling
     with warnings.catch_warnings(record=True):
@@ -93,16 +94,30 @@ def test_plot_topo():
         ch_names = evoked_delayed_ssp.ch_names[:3]  # make it faster
         picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
                                                          ch_names)
-        plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
+        fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
+        func = _get_presser(fig)
+        event = namedtuple('Event', 'inaxes')
+        func(event(inaxes=fig.axes[0]))
+        params = dict(evokeds=[picked_evoked_delayed_ssp],
+                      times=picked_evoked_delayed_ssp.times,
+                      fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
+        bools = [True] * len(params['projs'])
+        _plot_update_evoked_topo(params, bools)
+    # should auto-generate layout
+    plot_evoked_topo(picked_evoked_eeg.copy(),
+                     fig_background=np.zeros((4, 3, 3)), proj=True)
+    plt.close('all')
 
 
 def test_plot_topo_image_epochs():
     """Test plotting of epochs image topography
     """
+    import matplotlib.pyplot as plt
     title = 'ERF images - MNE sample data'
     epochs = _get_epochs()
-    plot_topo_image_epochs(epochs, layout, sigma=0.5, vmin=-200, vmax=200,
-                           colorbar=True, title=title)
+    cmap = mne_analyze_colormap(format='matplotlib')
+    plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
+                           colorbar=True, title=title, cmap=cmap)
     plt.close('all')
 
 
@@ -112,8 +127,11 @@ def test_plot_tfr_topo():
     epochs = _get_epochs()
     n_freqs = 3
     nave = 1
-    data = np.random.randn(len(epochs.ch_names), n_freqs, len(epochs.times))
+    data = np.random.RandomState(0).randn(len(epochs.ch_names),
+                                          n_freqs, len(epochs.times))
     tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
     tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
-                  vmin=0., vmax=14.)
-    tfr.plot([4], baseline=(None, 0), mode='ratio')
+                  vmin=0., vmax=14., show=False)
+    tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
+
+run_tests_if_main()
diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py
index e58042a..3504bf4 100644
--- a/mne/viz/tests/test_topomap.py
+++ b/mne/viz/tests/test_topomap.py
@@ -9,29 +9,30 @@ import os.path as op
 import warnings
 
 import numpy as np
-from numpy.testing import assert_raises
+from numpy.testing import assert_raises, assert_array_equal
 
 from nose.tools import assert_true, assert_equal
 
-# Set our plotters to test mode
-import matplotlib
-matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
-from mne import io
-from mne import read_evokeds, read_proj
+from mne import io, read_evokeds, read_proj
 from mne.io.constants import FIFF
-from mne.layouts import read_layout
-from mne.datasets import sample
+from mne.channels import read_layout, make_eeg_layout
+from mne.datasets import testing
 from mne.time_frequency.tfr import AverageTFR
+from mne.utils import slow_test
 
 from mne.viz import plot_evoked_topomap, plot_projs_topomap
+from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap,
+                             _find_peaks)
 
+# Set our plotters to test mode
+import matplotlib
+matplotlib.use('Agg')  # for testing don't use X server
 
 warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 
-data_dir = sample.data_path(download=False)
+data_dir = testing.data_path(download=False)
 subjects_dir = op.join(data_dir, 'subjects')
 ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
 
@@ -47,79 +48,191 @@ def _get_raw():
     return io.Raw(raw_fname, preload=False)
 
 
- at sample.requires_sample_data
+ at slow_test
+ at testing.requires_testing_data
 def test_plot_topomap():
     """Test topomap plotting
     """
+    import matplotlib.pyplot as plt
+    from matplotlib.patches import Circle
     # evoked
-    warnings.simplefilter('always', UserWarning)
+    warnings.simplefilter('always')
     res = 16
+    evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                          baseline=(None, 0))
+    ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
+    ev_bad.pick_channels(ev_bad.ch_names[:2])
+    ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6)  # auto, should plot EEG
+    assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
+    assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
+    assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
+    assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
+    assert_raises(ValueError, ev_bad.plot_topomap, times=[-100])  # bad time
+    assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]])  # bad time
+
+    evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
+    plt.close('all')
+    axes = [plt.subplot(221), plt.subplot(222)]
+    evoked.plot_topomap(axes=axes, colorbar=False)
+    plt.close('all')
+    evoked.plot_topomap(times=[-0.1, 0.2])
+    plt.close('all')
+    mask = np.zeros_like(evoked.data, dtype=bool)
+    mask[[1, 5], :] = True
+    evoked.plot_topomap(ch_type='mag', outlines=None)
+    times = [0.1]
+    evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
+    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
+    evoked.plot_topomap(times, ch_type='planar1', res=res)
+    evoked.plot_topomap(times, ch_type='planar2', res=res)
+    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
+                        show_names=True, mask_params={'marker': 'x'})
+    plt.close('all')
+    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
+                  res=res, average=-1000)
+    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
+                  res=res, average='hahahahah')
+
+    p = evoked.plot_topomap(times, ch_type='grad', res=res,
+                            show_names=lambda x: x.replace('MEG', ''),
+                            image_interp='bilinear')
+    subplot = [x for x in p.get_children() if
+               isinstance(x, matplotlib.axes.Subplot)][0]
+    assert_true(all('MEG' not in x.get_text()
+                    for x in subplot.get_children()
+                    if isinstance(x, matplotlib.text.Text)))
+
+    # Test title
+    def get_texts(p):
+        return [x.get_text() for x in p.get_children() if
+                isinstance(x, matplotlib.text.Text)]
+
+    p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
+    assert_equal(len(get_texts(p)), 0)
+    p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
+    texts = get_texts(p)
+    assert_equal(len(texts), 1)
+    assert_equal(texts[0], 'Custom')
+    plt.close('all')
+
+    # delaunay triangulation warning
+    with warnings.catch_warnings(record=True):  # can't show
+        warnings.simplefilter('always')
+        evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
+                  proj='interactive')  # projs have already been applied
+
+    # change to no-proj mode
+    evoked = read_evokeds(evoked_fname, 'Left Auditory',
+                          baseline=(None, 0), proj=False)
     with warnings.catch_warnings(record=True):
-        evoked = read_evokeds(evoked_fname, 'Left Auditory',
-                              baseline=(None, 0))
-        evoked.plot_topomap(0.1, 'mag', layout=layout)
-        mask = np.zeros_like(evoked.data, dtype=bool)
-        mask[[1, 5], :] = True
-        evoked.plot_topomap(None, ch_type='mag', outlines=None)
-        times = [0.1]
-        evoked.plot_topomap(times, ch_type='eeg', res=res)
-        evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
-        evoked.plot_topomap(times, ch_type='planar1', res=res)
-        evoked.plot_topomap(times, ch_type='planar2', res=res)
-        evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
-                            show_names=True, mask_params={'marker': 'x'})
-
-        p = evoked.plot_topomap(times, ch_type='grad', res=res,
-                                show_names=lambda x: x.replace('MEG', ''),
-                                image_interp='bilinear')
-        subplot = [x for x in p.get_children() if
-                   isinstance(x, matplotlib.axes.Subplot)][0]
-        assert_true(all('MEG' not in x.get_text()
-                        for x in subplot.get_children()
-                        if isinstance(x, matplotlib.text.Text)))
-
-        # Test title
-        def get_texts(p):
-            return [x.get_text() for x in p.get_children() if
-                    isinstance(x, matplotlib.text.Text)]
-
-        p = evoked.plot_topomap(times, ch_type='eeg', res=res)
-        assert_equal(len(get_texts(p)), 0)
-        p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
-        texts = get_texts(p)
-        assert_equal(len(texts), 1)
-        assert_equal(texts[0], 'Custom')
-
-        # delaunay triangulation warning
-        with warnings.catch_warnings(record=True):
-            evoked.plot_topomap(times, ch_type='mag', layout='auto', res=res)
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
-                      proj='interactive')  # projs have already been applied
-
-        # change to no-proj mode
-        evoked = read_evokeds(evoked_fname, 'Left Auditory',
-                              baseline=(None, 0), proj=False)
+        warnings.simplefilter('always')
         evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
-                      np.repeat(.1, 50))
-        assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                  np.repeat(.1, 50))
+    assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
 
+    with warnings.catch_warnings(record=True):  # file conventions
+        warnings.simplefilter('always')
         projs = read_proj(ecg_fname)
-        projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
-        plot_projs_topomap(projs, res=res)
-        plt.close('all')
-        for ch in evoked.info['chs']:
-            if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
-                if ch['eeg_loc'] is not None:
-                    ch['eeg_loc'].fill(0)
-                ch['loc'].fill(0)
-        assert_raises(RuntimeError, plot_evoked_topomap, evoked,
-                      times, ch_type='eeg')
+    projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
+    plot_projs_topomap(projs, res=res)
+    plt.close('all')
+    ax = plt.subplot(111)
+    plot_projs_topomap([projs[0]], res=res, axes=ax)  # test axes param
+    plt.close('all')
+    for ch in evoked.info['chs']:
+        if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
+            ch['loc'].fill(0)
+
+    # Remove extra digitization point, so EEG digitization points
+    # correspond with the EEG electrodes
+    del evoked.info['dig'][85]
+
+    pos = make_eeg_layout(evoked.info).pos[:, :2]
+    pos, outlines = _check_outlines(pos, 'head')
+    assert_true('head' in outlines.keys())
+    assert_true('nose' in outlines.keys())
+    assert_true('ear_left' in outlines.keys())
+    assert_true('ear_right' in outlines.keys())
+    assert_true('autoshrink' in outlines.keys())
+    assert_true(outlines['autoshrink'])
+    assert_true('clip_radius' in outlines.keys())
+    assert_array_equal(outlines['clip_radius'], 0.5)
+
+    pos, outlines = _check_outlines(pos, 'skirt')
+    assert_true('head' in outlines.keys())
+    assert_true('nose' in outlines.keys())
+    assert_true('ear_left' in outlines.keys())
+    assert_true('ear_right' in outlines.keys())
+    assert_true('autoshrink' in outlines.keys())
+    assert_true(not outlines['autoshrink'])
+    assert_true('clip_radius' in outlines.keys())
+    assert_array_equal(outlines['clip_radius'], 0.625)
+
+    pos, outlines = _check_outlines(pos, 'skirt',
+                                    head_pos={'scale': [1.2, 1.2]})
+    assert_array_equal(outlines['clip_radius'], 0.75)
+
+    # Plot skirt
+    evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
+
+    # Pass custom outlines without patch
+    evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
+    plt.close('all')
+
+    # Pass custom outlines with patch callable
+    def patch():
+        return Circle((0.5, 0.4687), radius=.46,
+                      clip_on=True, transform=plt.gca().transAxes)
+    outlines['patch'] = patch
+    plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
+
+    # Remove digitization points. Now topomap should fail
+    evoked.info['dig'] = None
+    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
+                  times, ch_type='eeg')
+    plt.close('all')
+
+    # Test error messages for invalid pos parameter
+    n_channels = len(pos)
+    data = np.ones(n_channels)
+    pos_1d = np.zeros(n_channels)
+    pos_3d = np.zeros((n_channels, 2, 2))
+    assert_raises(ValueError, plot_topomap, data, pos_1d)
+    assert_raises(ValueError, plot_topomap, data, pos_3d)
+    assert_raises(ValueError, plot_topomap, data, pos[:3, :])
+
+    pos_x = pos[:, :1]
+    pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
+    assert_raises(ValueError, plot_topomap, data, pos_x)
+    assert_raises(ValueError, plot_topomap, data, pos_xyz)
+
+    # An #channels x 4 matrix should work though. In this case (x, y, width,
+    # height) is assumed.
+    pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
+    plot_topomap(data, pos_xywh)
+    plt.close('all')
+
+    # Test peak finder
+    axes = [plt.subplot(131), plt.subplot(132)]
+    evoked.plot_topomap(times='peaks', axes=axes)
+    plt.close('all')
+    evoked.data = np.zeros(evoked.data.shape)
+    evoked.data[50][1] = 1
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
+    evoked.data[80][100] = 1
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
+    evoked.data[2][95] = 2
+    assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
+    assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
 
 
 def test_plot_tfr_topomap():
     """Test plotting of TFR data
     """
+    import matplotlib as mpl
+    import matplotlib.pyplot as plt
     raw = _get_raw()
     times = np.linspace(-0.1, 0.1, 200)
     n_freqs = 3
@@ -129,3 +242,17 @@ def test_plot_tfr_topomap():
     tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
     tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
                      res=16)
+
+    eclick = mpl.backend_bases.MouseEvent('button_press_event',
+                                          plt.gcf().canvas, 0, 0, 1)
+    eclick.xdata = 0.1
+    eclick.ydata = 0.1
+    eclick.inaxes = plt.gca()
+    erelease = mpl.backend_bases.MouseEvent('button_release_event',
+                                            plt.gcf().canvas, 0.9, 0.9, 1)
+    erelease.xdata = 0.3
+    erelease.ydata = 0.2
+    pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
+    _onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
+    tfr._onselect(eclick, erelease, None, 'mean', None)
+    plt.close('all')
diff --git a/mne/viz/tests/test_utils.py b/mne/viz/tests/test_utils.py
index f87cc33..7a337ac 100644
--- a/mne/viz/tests/test_utils.py
+++ b/mne/viz/tests/test_utils.py
@@ -4,25 +4,84 @@
 
 import os.path as op
 import warnings
+import numpy as np
+from nose.tools import assert_true, assert_raises
+from numpy.testing import assert_allclose
 
-from mne.viz.utils import compare_fiff
-
-
-warnings.simplefilter('always')  # enable b/c these tests throw warnings
+from mne.viz.utils import compare_fiff, _fake_click
+from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
+from mne.utils import run_tests_if_main
 
 # Set our plotters to test mode
 import matplotlib
 matplotlib.use('Agg')  # for testing don't use X server
-import matplotlib.pyplot as plt
 
+warnings.simplefilter('always')  # enable b/c these tests throw warnings
 
 base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
 raw_fname = op.join(base_dir, 'test_raw.fif')
 cov_fname = op.join(base_dir, 'test-cov.fif')
 
 
-def test_compare_fiff():
-    """Test comparing fiff files
+def test_mne_analyze_colormap():
+    """Test mne_analyze_colormap
     """
+    assert_raises(ValueError, mne_analyze_colormap, [0])
+    assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
+    assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
+
+
+def test_compare_fiff():
+    import matplotlib.pyplot as plt
     compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
     plt.close('all')
+
+
+def test_clickable_image():
+    """Test the ClickableImage class."""
+    # Gen data and create clickable image
+    import matplotlib.pyplot as plt
+    im = np.random.randn(100, 100)
+    clk = ClickableImage(im)
+    clicks = [(12, 8), (46, 48), (10, 24)]
+
+    # Generate clicks
+    for click in clicks:
+        _fake_click(clk.fig, clk.ax, click, xform='data')
+    assert_allclose(np.array(clicks), np.array(clk.coords))
+    assert_true(len(clicks) == len(clk.coords))
+
+    # Exporting to layout
+    lt = clk.to_layout()
+    assert_true(lt.pos.shape[0] == len(clicks))
+    assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
+                    clicks[1][0] / float(clicks[2][0]))
+    clk.plot_clicks()
+    plt.close('all')
+
+
+def test_add_background_image():
+    """Test adding background image to a figure."""
+    import matplotlib.pyplot as plt
+    f, axs = plt.subplots(1, 2)
+    x, y = np.random.randn(2, 10)
+    im = np.random.randn(10, 10)
+    axs[0].scatter(x, y)
+    axs[1].scatter(y, x)
+    for ax in axs:
+        ax.set_aspect(1)
+
+    # Background without changing aspect
+    ax_im = add_background_image(f, im)
+    assert_true(ax_im.get_aspect() == 'auto')
+    for ax in axs:
+        assert_true(ax.get_aspect() == 1)
+
+    # Background with changing aspect
+    ax_im_asp = add_background_image(f, im, set_ratios='auto')
+    assert_true(ax_im_asp.get_aspect() == 'auto')
+    for ax in axs:
+        assert_true(ax.get_aspect() == 'auto')
+
+
+run_tests_if_main()
diff --git a/mne/viz/topo.py b/mne/viz/topo.py
index b87a7c7..e847b0c 100644
--- a/mne/viz/topo.py
+++ b/mne/viz/topo.py
@@ -14,24 +14,19 @@ from itertools import cycle
 from functools import partial
 
 import numpy as np
-from scipy import ndimage
 
-# XXX : don't import pyplot here or you will break the doc
-
-from ..baseline import rescale
-from ..utils import deprecated
 from ..io.pick import channel_type, pick_types
 from ..fixes import normalize_colors
-from ..utils import _clean_names
+from ..utils import _clean_names, deprecated
 
-from .utils import _mutable_defaults, _check_delayed_ssp, COLORS
-from .utils import _draw_proj_checkbox
+from ..defaults import _handle_default
+from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
+                    add_background_image)
 
 
 def iter_topography(info, layout=None, on_pick=None, fig=None,
                     fig_facecolor='k', axis_facecolor='k',
-                    axis_spinecolor='k', layout_scale=None,
-                    colorbar=False):
+                    axis_spinecolor='k', layout_scale=None):
     """ Create iterator over channel positions
 
     This function returns a generator that unpacks into
@@ -81,7 +76,7 @@ def iter_topography(info, layout=None, on_pick=None, fig=None,
 
     fig.set_facecolor(fig_facecolor)
     if layout is None:
-        from ..layouts import find_layout
+        from ..channels import find_layout
         layout = find_layout(info)
 
     if on_pick is not None:
@@ -111,8 +106,9 @@ def iter_topography(info, layout=None, on_pick=None, fig=None,
 
 def _plot_topo(info=None, times=None, show_func=None, layout=None,
                decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
-               border='none', cmap=None, layout_scale=None, title=None,
-               x_label=None, y_label=None, vline=None):
+               border='none', axis_facecolor='k', fig_facecolor='k',
+               cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
+               y_label=None, vline=None, font_color='w'):
     """Helper function to plot on sensor layout"""
     import matplotlib.pyplot as plt
 
@@ -127,15 +123,17 @@ def _plot_topo(info=None, times=None, show_func=None, layout=None,
         norm = normalize_colors(vmin=vmin, vmax=vmax)
         sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
         sm.set_array(np.linspace(vmin, vmax))
-        ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
+        ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
         cb = fig.colorbar(sm, ax=ax)
         cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
-        plt.setp(cb_yticks, color='w')
+        plt.setp(cb_yticks, color=font_color)
+        ax.axis('off')
 
     my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
                                    fig=fig, layout_scale=layout_scale,
                                    axis_spinecolor=border,
-                                   colorbar=colorbar)
+                                   axis_facecolor=axis_facecolor,
+                                   fig_facecolor=fig_facecolor)
 
     for ax, ch_idx in my_topo_plot:
         if layout.kind == 'Vectorview-all' and ylim is not None:
@@ -151,7 +149,7 @@ def _plot_topo(info=None, times=None, show_func=None, layout=None,
             plt.ylim(*ylim_)
 
     if title is not None:
-        plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
+        plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
 
     return fig
 
@@ -184,23 +182,32 @@ def _plot_topo_onpick(event, show_func=None, colorbar=False):
         raise err
 
 
-def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
-                freq=None, vline=None, x_label=None, y_label=None,
-                colorbar=False, picker=True, cmap=None):
+def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
+                tfr=None, freq=None, vline=None, x_label=None, y_label=None,
+                colorbar=False, picker=True, cmap='RdBu_r', title=None):
     """ Aux function to show time-freq map on topo """
     import matplotlib.pyplot as plt
-    if cmap is None:
-        cmap = plt.cm.jet
-
+    from matplotlib.widgets import RectangleSelector
     extent = (tmin, tmax, freq[0], freq[-1])
-    ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
-              vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
-    if x_label is not None:
-        plt.xlabel(x_label)
-    if y_label is not None:
-        plt.ylabel(y_label)
+    img = ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
+                    vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
+    if isinstance(ax, plt.Axes):
+        if x_label is not None:
+            ax.set_xlabel(x_label)
+        if y_label is not None:
+            ax.set_ylabel(y_label)
+    else:
+        if x_label is not None:
+            plt.xlabel(x_label)
+        if y_label is not None:
+            plt.ylabel(y_label)
     if colorbar:
-        plt.colorbar()
+        plt.colorbar(mappable=img)
+    if title:
+        plt.title(title)
+    if not isinstance(ax, plt.Axes):
+        ax = plt.gca()
+    ax.RS = RectangleSelector(ax, onselect=onselect)  # reference must be kept
 
 
 def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
@@ -217,7 +224,8 @@ def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
         else:
             ax.plot(times, data_[ch_idx], color_)
     if vline:
-        [plt.axvline(x, color='w', linewidth=0.5) for x in vline]
+        for x in vline:
+            plt.axvline(x, color='w', linewidth=0.5)
     if x_label is not None:
         plt.xlabel(x_label)
     if y_label is not None:
@@ -228,12 +236,16 @@ def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
 
 def _check_vlim(vlim):
     """AUX function"""
-    return not np.isscalar(vlim) and not vlim is None
+    return not np.isscalar(vlim) and vlim is not None
 
 
+ at deprecated("It will be removed in version 0.11. "
+            "Please use evoked.plot_topo or viz.evoked.plot_evoked_topo "
+            "for list of evoked instead.")
 def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
               border='none', ylim=None, scalings=None, title=None, proj=False,
-              vline=[0.0]):
+              vline=[0.0], fig_facecolor='k', fig_background=None,
+              axis_facecolor='k', font_color='w', show=True):
     """Plot 2D topography of evoked responses.
 
     Clicking on the plot of an individual sensor opens a new figure showing
@@ -256,28 +268,110 @@ def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
         automatically drawn.
     border : str
         matplotlib borders style to be used for each sensor plot.
+    ylim : dict | None
+        ylim for plots. The value determines the upper and lower subplot
+        limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
+        mag, grad, misc. If None, the ylim parameter for each channel is
+        determined by the maximum absolute peak.
     scalings : dict | None
         The scalings of the channel types to be applied for plotting. If None,`
         defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
+    proj : bool | 'interactive'
+        If true SSP projections are applied before display. If 'interactive',
+        a check box for reversible selection of SSP projection vectors will
+        be shown.
+    vline : list of floats | None
+        The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : Instance of matplotlib.figure.Figure
+        Images of evoked responses at sensor locations
+    """
+    return _plot_evoked_topo(evoked=evoked, layout=layout,
+                             layout_scale=layout_scale, color=color,
+                             border=border, ylim=ylim, scalings=scalings,
+                             title=title, proj=proj, vline=vline,
+                             fig_facecolor=fig_facecolor,
+                             fig_background=fig_background,
+                             axis_facecolor=axis_facecolor,
+                             font_color=font_color, show=show)
+
+
+def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
+                      border='none', ylim=None, scalings=None, title=None,
+                      proj=False, vline=[0.0], fig_facecolor='k',
+                      fig_background=None, axis_facecolor='k', font_color='w',
+                      show=True):
+    """Plot 2D topography of evoked responses.
+
+    Clicking on the plot of an individual sensor opens a new figure showing
+    the evoked response for the selected sensor.
+
+    Parameters
+    ----------
+    evoked : list of Evoked | Evoked
+        The evoked response to plot.
+    layout : instance of Layout | None
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout is
+        inferred from the data.
+    layout_scale: float
+        Scaling factor for adjusting the relative size of the layout
+        on the canvas
+    color : list of color objects | color object | None
+        Everything matplotlib accepts to specify colors. If not list-like,
+        the color specified will be repeated. If None, colors are
+        automatically drawn.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
     ylim : dict | None
         ylim for plots. The value determines the upper and lower subplot
         limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
         mag, grad, misc. If None, the ylim parameter for each channel is
         determined by the maximum absolute peak.
+    scalings : dict | None
+        The scalings of the channel types to be applied for plotting. If None,`
+        defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    title : str
+        Title of the figure.
     proj : bool | 'interactive'
         If true SSP projections are applied before display. If 'interactive',
         a check box for reversible selection of SSP projection vectors will
         be shown.
-    title : str
-        Title of the figure.
     vline : list of floats | None
         The values at which to show a vertical line.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    fig_background : None | numpy ndarray
+        A background image for the figure. This must work with a call to
+        plt.imshow. Defaults to None.
+    axis_facecolor : str | obj
+        The face color to be used for each sensor plot. Defaults to black.
+    font_color : str | obj
+        The color of text in the colorbar and title. Defaults to white.
+    show : bool
+        Show figure if True.
 
     Returns
     -------
     fig : Instance of matplotlib.figure.Figure
         Images of evoked responses at sensor locations
     """
+    import matplotlib.pyplot as plt
 
     if not type(evoked) in (tuple, list):
         evoked = [evoked]
@@ -298,56 +392,63 @@ def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
         color = cycle([color])
 
     times = evoked[0].times
-    if not all([(e.times == times).all() for e in evoked]):
+    if not all((e.times == times).all() for e in evoked):
         raise ValueError('All evoked.times must be the same')
 
     info = evoked[0].info
     ch_names = evoked[0].ch_names
-    if not all([e.ch_names == ch_names for e in evoked]):
+    if not all(e.ch_names == ch_names for e in evoked):
         raise ValueError('All evoked.picks must be the same')
     ch_names = _clean_names(ch_names)
 
     if layout is None:
-        from ..layouts.layout import find_layout
+        from ..channels.layout import find_layout
         layout = find_layout(info)
 
     # XXX. at the moment we are committed to 1- / 2-sensor-types layouts
     chs_in_layout = set(layout.names) & set(ch_names)
     types_used = set(channel_type(info, ch_names.index(ch))
                      for ch in chs_in_layout)
+    # remove possible reference meg channels
+    types_used = set.difference(types_used, set('ref_meg'))
     # one check for all vendors
-    meg_types = ['mag'], ['grad'], ['mag', 'grad'],
-    is_meg = any(types_used == set(k) for k in meg_types)
+    meg_types = set(('mag', 'grad'))
+    is_meg = len(set.intersection(types_used, meg_types)) > 0
     if is_meg:
         types_used = list(types_used)[::-1]  # -> restore kwarg order
         picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
                  for kk in types_used]
     else:
         types_used_kwargs = dict((t, True) for t in types_used)
-        picks = [pick_types(info, meg=False, **types_used_kwargs)]
+        picks = [pick_types(info, meg=False, exclude=[], **types_used_kwargs)]
     assert isinstance(picks, list) and len(types_used) == len(picks)
 
-    scalings = _mutable_defaults(('scalings', scalings))[0]
+    scalings = _handle_default('scalings', scalings)
     evoked = [e.copy() for e in evoked]
     for e in evoked:
         for pick, t in zip(picks, types_used):
             e.data[pick] = e.data[pick] * scalings[t]
 
-    if proj is True and all([e.proj is not True for e in evoked]):
+    if proj is True and all(e.proj is not True for e in evoked):
         evoked = [e.apply_proj() for e in evoked]
     elif proj == 'interactive':  # let it fail early.
         for e in evoked:
             _check_delayed_ssp(e)
 
     if ylim is None:
-        set_ylim = lambda x: np.abs(x).max()
+        def set_ylim(x):
+            return np.abs(x).max()
         ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
         ymax = np.array(ylim_)
         ylim_ = (-ymax, ymax)
     elif isinstance(ylim, dict):
-        ylim_ = _mutable_defaults(('ylim', ylim))[0]
+        ylim_ = _handle_default('ylim', ylim)
         ylim_ = [ylim_[kk] for kk in types_used]
-        ylim_ = zip(*[np.array(yl) for yl in ylim_])
+        # extra unpack to avoid bug #1700
+        if len(ylim_) == 1:
+            ylim_ = ylim_[0]
+        else:
+            ylim_ = zip(*[np.array(yl) for yl in ylim_])
     else:
         raise ValueError('ylim must be None ore a dict')
 
@@ -356,8 +457,13 @@ def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
 
     fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
                      decim=1, colorbar=False, ylim=ylim_, cmap=None,
-                     layout_scale=layout_scale, border=border, title=title,
-                     x_label='Time (s)', vline=vline)
+                     layout_scale=layout_scale, border=border,
+                     fig_facecolor=fig_facecolor, font_color=font_color,
+                     axis_facecolor=axis_facecolor,
+                     title=title, x_label='Time (s)', vline=vline)
+
+    if fig_background is not None:
+        add_background_image(fig, fig_background)
 
     if proj == 'interactive':
         for e in evoked:
@@ -367,6 +473,9 @@ def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
                       projs=evoked[0].info['projs'], fig=fig)
         _draw_proj_checkbox(None, params)
 
+    if show:
+        plt.show()
+
     return fig
 
 
@@ -397,246 +506,17 @@ def _plot_update_evoked_topo(params, bools):
     fig.canvas.draw()
 
 
- at deprecated('`plot_topo_tfr` is deprecated and will be removed in '
-            'MNE 0.9. Use `plot_topo` method on TFR objects.')
-def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
-                  vmax=None, cmap='RdBu_r', layout_scale=0.945, title=None):
-    """Plot time-frequency data on sensor layout
-
-    Clicking on the time-frequency map of an individual sensor opens a
-    new figure showing the time-frequency map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the power
-    tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
-        The time-frequency data. Must have the same channels as Epochs.
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap | str
-        Colors to be mapped to the values. Default 'RdBu_r'.
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Images of time-frequency data at sensor locations
-    """
-
-    if vmin is None:
-        vmin = tfr.min()
-    if vmax is None:
-        vmax = tfr.max()
-
-    if layout is None:
-        from ..layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq, cmap=cmap)
-
-    fig = _plot_topo(info=epochs.info, times=epochs.times,
-                     show_func=tfr_imshow, layout=layout, border='w',
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title,
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
- at deprecated('`plot_topo_power` is deprecated and will be removed in '
-            'MNE 0.9. Use `plot_topo` method on TFR objects.')
-def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
-                    mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
-                    cmap=None, layout_scale=0.945, dB=True, title=None):
-    """Plot induced power on sensor layout
-
-    Clicking on the induced power map of an individual sensor opens a
-    new figure showing the induced power map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the power
-    power : 3D-array
-        First return value from mne.time_frequency.induced_power
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    baseline : tuple or list of length 2
-        The time interval to apply rescaling / baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal to (None, None) all the time
-        interval is used.
-    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
-        Do baseline correction with ratio (power is divided by mean
-        power during baseline) or z-score (power is divided by standard
-        deviation of power during baseline after subtracting the mean,
-        power = [power - mean(power_baseline)] / std(power_baseline))
-        If None, baseline no correction will be performed.
-    decim : integer
-        Increment for selecting each nth time slice
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas
-    dB : bool
-        If True, log10 will be applied to the data.
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figure
-        Images of induced power at sensor locations
-    """
-    times = epochs.times[::decim].copy()
-    if mode is not None:
-        if baseline is None:
-            baseline = epochs.baseline
-        power = rescale(power.copy(), times, baseline, mode)
-    times *= 1e3
-    if dB:
-        power = 20 * np.log10(power)
-    if vmin is None:
-        vmin = power.min()
-    if vmax is None:
-        vmax = power.max()
-    if layout is None:
-        from ..layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
-
-    fig = _plot_topo(info=epochs.info, times=times,
-                     show_func=power_imshow, layout=layout, decim=decim,
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title, border='w',
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
- at deprecated('`plot_topo_phase_lock` is deprecated and will be removed in '
-            'MNE 0.9. Use `plot_topo` method on TFR objects.')
-def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
-                         mode='mean', decim=1, colorbar=True, vmin=None,
-                         vmax=None, cmap=None, layout_scale=0.945,
-                         title=None):
-    """Plot phase locking values (PLV) on sensor layout
-
-    Clicking on the PLV map of an individual sensor opens a new figure
-    showing the PLV map of the selected sensor.
-
-    Parameters
-    ----------
-    epochs : instance of Epochs
-        The epochs used to generate the phase locking value
-    phase_lock : 3D-array
-        Phase locking value, second return value from
-        mne.time_frequency.induced_power.
-    freq : array-like
-        Frequencies of interest as passed to induced_power
-    layout : instance of Layout | None
-        Layout instance specifying sensor positions (does not need to
-        be specified for Neuromag data). If possible, the correct layout is
-        inferred from the data.
-    baseline : tuple or list of length 2
-        The time interval to apply rescaling / baseline correction.
-        If None do not apply it. If baseline is (a, b)
-        the interval is between "a (s)" and "b (s)".
-        If a is None the beginning of the data is used
-        and if b is None then b is set to the end of the interval.
-        If baseline is equal to (None, None) all the time
-        interval is used.
-    mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
-        Do baseline correction with ratio (phase is divided by mean
-        phase during baseline) or z-score (phase is divided by standard
-        deviation of phase during baseline after subtracting the mean,
-        phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
-        If None, baseline no correction will be performed.
-    decim : integer
-        Increment for selecting each nth time slice
-    colorbar : bool
-        If true, colorbar will be added to the plot
-    vmin : float
-        Minimum value mapped to lowermost color
-    vmax : float
-        Minimum value mapped to upppermost color
-    cmap : instance of matplotlib.pyplot.colormap
-        Colors to be mapped to the values
-    layout_scale : float
-        Scaling factor for adjusting the relative size of the layout
-        on the canvas.
-    title : str
-        Title of the figure.
-
-    Returns
-    -------
-    fig : Instance of matplotlib.figure.Figrue
-        Phase lock images at sensor locations
-    """
-    times = epochs.times[::decim] * 1e3
-    if mode is not None:
-        if baseline is None:
-            baseline = epochs.baseline
-        phase = rescale(phase.copy(), times, baseline, mode)
-    if vmin is None:
-        vmin = phase.min()
-    if vmax is None:
-        vmax = phase.max()
-    if layout is None:
-        from ..layouts.layout import find_layout
-        layout = find_layout(epochs.info)
-
-    phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
-
-    fig = _plot_topo(info=epochs.info, times=times,
-                     show_func=phase_imshow, layout=layout, decim=decim,
-                     colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
-                     layout_scale=layout_scale, title=title, border='w',
-                     x_label='Time (s)', y_label='Frequency (Hz)')
-
-    return fig
-
-
 def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
                      data=None, epochs=None, sigma=None,
                      order=None, scalings=None, vline=None,
-                     x_label=None, y_label=None, colorbar=False):
+                     x_label=None, y_label=None, colorbar=False,
+                     cmap='RdBu_r'):
     """Aux function to plot erfimage on sensor topography"""
-
+    from scipy import ndimage
     import matplotlib.pyplot as plt
     this_data = data[:, ch_idx, :].copy()
     ch_type = channel_type(epochs.info, ch_idx)
-    if not ch_type in scalings:
+    if ch_type not in scalings:
         raise KeyError('%s channel type not in scalings' % ch_type)
     this_data *= scalings[ch_type]
 
@@ -646,10 +526,12 @@ def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
     if order is not None:
         this_data = this_data[order]
 
-    this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
+    if sigma > 0.:
+        this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
 
     ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
-              origin='lower', vmin=vmin, vmax=vmax, picker=True)
+              origin='lower', vmin=vmin, vmax=vmax, picker=True,
+              cmap=cmap, interpolation='nearest')
 
     if x_label is not None:
         plt.xlabel(x_label)
@@ -659,9 +541,11 @@ def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
         plt.colorbar()
 
 
-def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
-                           vmax=None, colorbar=True, order=None, cmap=None,
-                           layout_scale=.95, title=None, scalings=None):
+def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
+                           vmax=None, colorbar=True, order=None, cmap='RdBu_r',
+                           layout_scale=.95, title=None, scalings=None,
+                           border='none', fig_facecolor='k', font_color='w',
+                           show=True):
     """Plot Event Related Potential / Fields image on topographies
 
     Parameters
@@ -672,7 +556,7 @@ def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
         System specific sensor positions.
     sigma : float
         The standard deviation of the Gaussian smoothing to apply along
-        the epoch axis to apply in the image.
+        the epoch axis to apply in the image. If 0., no smoothing is applied.
     vmin : float
         The min value in the image. The unit is uV for EEG channels,
         fT for magnetometers and fT/cm for gradiometers.
@@ -697,29 +581,42 @@ def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
     scalings : dict | None
         The scalings of the channel types to be applied for plotting. If
         None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
+    border : str
+        matplotlib borders style to be used for each sensor plot.
+    fig_facecolor : str | obj
+        The figure face color. Defaults to black.
+    font_color : str | obj
+        The color of tick labels in the colorbar. Defaults to white.
+    show : bool
+        Show figure if True.
 
     Returns
     -------
     fig : instance of matplotlib figure
         Figure distributing one image per channel across sensor topography.
     """
-    scalings = _mutable_defaults(('scalings', scalings))[0]
+    import matplotlib.pyplot as plt
+    scalings = _handle_default('scalings', scalings)
     data = epochs.get_data()
     if vmin is None:
         vmin = data.min()
     if vmax is None:
         vmax = data.max()
     if layout is None:
-        from ..layouts.layout import find_layout
+        from ..channels.layout import find_layout
         layout = find_layout(epochs.info)
 
     erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
-                         data=data, epochs=epochs, sigma=sigma)
+                         data=data, epochs=epochs, sigma=sigma,
+                         cmap=cmap)
 
     fig = _plot_topo(info=epochs.info, times=epochs.times,
                      show_func=erf_imshow, layout=layout, decim=1,
                      colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
                      layout_scale=layout_scale, title=title,
-                     border='w', x_label='Time (s)', y_label='Epoch')
-
+                     fig_facecolor=fig_facecolor,
+                     font_color=font_color, border=border,
+                     x_label='Time (s)', y_label='Epoch')
+    if show:
+        plt.show()
     return fig
diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py
index 5e66284..1be92dc 100644
--- a/mne/viz/topomap.py
+++ b/mne/viz/topomap.py
@@ -11,6 +11,7 @@ from __future__ import print_function
 
 import math
 import copy
+from functools import partial
 
 import numpy as np
 from scipy import linalg
@@ -18,17 +19,21 @@ from scipy import linalg
 from ..baseline import rescale
 from ..io.constants import FIFF
 from ..io.pick import pick_types
-from ..utils import _clean_names, deprecated
-from .utils import tight_layout, _setup_vmin_vmax, DEFAULTS
-from .utils import _prepare_trellis, _check_delayed_ssp
-from .utils import _draw_proj_checkbox
+from ..utils import _clean_names, _time_mask, verbose, logger
+from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
+                    _check_delayed_ssp, _draw_proj_checkbox, figure_nobar)
+from ..time_frequency import compute_epochs_psd
+from ..defaults import _handle_default
+from ..channels.layout import _find_topomap_coords
+from ..fixes import _get_argrelmax
 
 
-def _prepare_topo_plot(obj, ch_type, layout):
+def _prepare_topo_plot(inst, ch_type, layout):
     """"Aux Function"""
-    info = copy.deepcopy(obj.info)
+    info = copy.deepcopy(inst.info)
+
     if layout is None and ch_type is not 'eeg':
-        from ..layouts.layout import find_layout
+        from ..channels import find_layout
         layout = find_layout(info)
     elif layout == 'auto':
         layout = None
@@ -40,7 +45,7 @@ def _prepare_topo_plot(obj, ch_type, layout):
     # special case for merging grad channels
     if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
             np.unique([ch['coil_type'] for ch in info['chs']])):
-        from ..layouts.layout import _pair_grad_sensors
+        from ..channels.layout import _pair_grad_sensors
         picks, pos = _pair_grad_sensors(info, layout)
         merge_grads = True
     else:
@@ -56,15 +61,28 @@ def _prepare_topo_plot(obj, ch_type, layout):
             raise ValueError("No channels of type %r" % ch_type)
 
         if layout is None:
-            chs = [info['chs'][i] for i in picks]
-            from ..layouts.layout import _find_topomap_coords
-            pos = _find_topomap_coords(chs, layout)
+            pos = _find_topomap_coords(info, picks)
         else:
             names = [n.upper() for n in layout.names]
-            pos = [layout.pos[names.index(info['ch_names'][k].upper())]
-                   for k in picks]
-
-    return picks, pos, merge_grads, info['ch_names']
+            pos = list()
+            for pick in picks:
+                this_name = info['ch_names'][pick].upper()
+                if this_name in names:
+                    pos.append(layout.pos[names.index(this_name)])
+                else:
+                    logger.warning('Failed to locate %s channel positions from'
+                                   ' layout. Inferring channel positions from '
+                                   'data.' % ch_type)
+                    pos = _find_topomap_coords(info, picks)
+                    break
+
+    ch_names = [info['ch_names'][k] for k in picks]
+    if merge_grads:
+        # change names so that vectorview combined grads appear as MEG014x
+        # instead of MEG0142 or MEG0143 which are the 2 planar grads.
+        ch_names = [ch_names[k][:-1] + 'x' for k in range(0, len(ch_names), 2)]
+    pos = np.array(pos)[:, :2]  # 2D plot, otherwise interpolation bugs
+    return picks, pos, merge_grads, ch_names, ch_type
 
 
 def _plot_update_evoked_topomap(params, bools):
@@ -81,7 +99,7 @@ def _plot_update_evoked_topomap(params, bools):
     data = new_evoked.data[np.ix_(params['picks'],
                                   params['time_idx'])] * params['scale']
     if params['merge_grads']:
-        from ..layouts.layout import _merge_grad_data
+        from ..channels.layout import _merge_grad_data
         data = _merge_grad_data(data)
     image_mask = params['image_mask']
 
@@ -100,9 +118,10 @@ def _plot_update_evoked_topomap(params, bools):
     params['fig'].canvas.draw()
 
 
-def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
+def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors=True,
                        colorbar=False, res=64, size=1, show=True,
-                       outlines='head', contours=6, image_interp='bilinear'):
+                       outlines='head', contours=6, image_interp='bilinear',
+                       axes=None):
     """Plot topographic maps of SSP projections
 
     Parameters
@@ -117,7 +136,8 @@ def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
         Colormap.
     sensors : bool | str
         Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
     colorbar : bool
         Plot a colorbar.
     res : int
@@ -126,27 +146,41 @@ def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
         Side length of the topomaps in inches (only applies when plotting
         multiple topomaps at a time).
     show : bool
-        Show figures if True
-    outlines : 'head' | dict | None
-        The outlines to be drawn. If 'head', a head scheme will be drawn. If
-        dict, each key refers to a tuple of x and y positions. The values in
-        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
         Defaults to 'head'.
     contours : int | False | None
         The number of contour lines to draw. If 0, no contours will be drawn.
     image_interp : str
         The image interpolation to be used. All matplotlib options are
         accepted.
+    axes : instance of Axes | list | None
+        The axes to plot to. If list, the list must be a list of Axes of
+        the same length as the number of projectors. If instance of Axes,
+        there must be only one projector. Defaults to None.
 
     Returns
     -------
     fig : instance of matplotlib figure
         Figure distributing one image per channel across sensor topography.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
     """
     import matplotlib.pyplot as plt
 
     if layout is None:
-        from ..layouts import read_layout
+        from ..channels import read_layout
         layout = read_layout('Vectorview-all')
 
     if not isinstance(layout, list):
@@ -156,10 +190,18 @@ def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
     nrows = math.floor(math.sqrt(n_projs))
     ncols = math.ceil(n_projs / nrows)
 
-    fig = plt.gcf()
-    fig.clear()
-    for k, proj in enumerate(projs):
-
+    if axes is None:
+        plt.figure()
+        axes = list()
+        for idx in range(len(projs)):
+            ax = plt.subplot(nrows, ncols, idx + 1)
+            axes.append(ax)
+    elif isinstance(axes, plt.Axes):
+        axes = [axes]
+    if len(axes) != len(projs):
+        raise RuntimeError('There must be an axes for each picked projector.')
+    for proj_idx, proj in enumerate(projs):
+        axes[proj_idx].set_title(proj['desc'][:10] + '...')
         ch_names = _clean_names(proj['data']['col_names'])
         data = proj['data']['data'].ravel()
 
@@ -167,7 +209,7 @@ def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
         for l in layout:
             is_vv = l.kind.startswith('Vectorview')
             if is_vv:
-                from ..layouts.layout import _pair_grad_sensors_from_ch_names
+                from ..channels.layout import _pair_grad_sensors_from_ch_names
                 grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
                 if grad_pairs:
                     ch_names = [ch_names[i] for i in grad_pairs]
@@ -178,40 +220,50 @@ def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,',
 
             pos = l.pos[idx]
             if is_vv and grad_pairs:
-                from ..layouts.layout import _merge_grad_data
+                from ..channels.layout import _merge_grad_data
                 shape = (len(idx) / 2, 2, -1)
                 pos = pos.reshape(shape).mean(axis=1)
                 data = _merge_grad_data(data[grad_pairs]).ravel()
 
             break
 
-        ax = plt.subplot(nrows, ncols, k + 1)
-        ax.set_title(proj['desc'][:10] + '...')
         if len(idx):
-            plot_topomap(data, pos, vmax=None, cmap=cmap,
-                         sensors=sensors, res=res, outlines=outlines,
-                         contours=contours, image_interp=image_interp)
+            plot_topomap(data, pos[:, :2], vmax=None, cmap=cmap,
+                         sensors=sensors, res=res, axis=axes[proj_idx],
+                         outlines=outlines, contours=contours,
+                         image_interp=image_interp, show=False)
             if colorbar:
                 plt.colorbar()
         else:
             raise RuntimeError('Cannot find a proper layout for projection %s'
                                % proj['desc'])
-    fig = ax.get_figure()
+    tight_layout(fig=axes[0].get_figure())
     if show and plt.get_backend() != 'agg':
-        fig.show()
-    tight_layout(fig=fig)
+        plt.show()
 
-    return fig
+    return axes[0].get_figure()
 
 
-def _check_outlines(pos, outlines, head_scale=0.85):
+def _check_outlines(pos, outlines, head_pos=None):
     """Check or create outlines for topoplot
     """
-    pos = np.asarray(pos)
-    if outlines in ('head', None):
+    pos = np.array(pos, float)[:, :2]  # ensure we have a copy
+    head_pos = dict() if head_pos is None else head_pos
+    if not isinstance(head_pos, dict):
+        raise TypeError('head_pos must be dict or None')
+    head_pos = copy.deepcopy(head_pos)
+    for key in head_pos.keys():
+        if key not in ('center', 'scale'):
+            raise KeyError('head_pos must only contain "center" and '
+                           '"scale"')
+        head_pos[key] = np.array(head_pos[key], float)
+        if head_pos[key].shape != (2,):
+            raise ValueError('head_pos["%s"] must have shape (2,), not '
+                             '%s' % (key, head_pos[key].shape))
+
+    if outlines in ('head', 'skirt', None):
         radius = 0.5
-        step = 2 * np.pi / 101
-        l = np.arange(0, 2 * np.pi + step, step)
+        l = np.linspace(0, 2 * np.pi, 101)
         head_x = np.cos(l) * radius
         head_y = np.sin(l) * radius
         nose_x = np.array([0.18, 0, -0.18]) * radius
@@ -220,23 +272,46 @@ def _check_outlines(pos, outlines, head_scale=0.85):
                          .532, .510, .489])
         ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
                           -.1313, -.1384, -.1199])
-        x, y = pos[:, :2].T
-        x_range = np.abs(x.max() - x.min())
-        y_range = np.abs(y.max() - y.min())
 
         # shift and scale the electrode positions
-        pos[:, 0] = head_scale * ((pos[:, 0] - x.min()) / x_range - 0.5)
-        pos[:, 1] = head_scale * ((pos[:, 1] - y.min()) / y_range - 0.5)
+        if 'center' not in head_pos:
+            head_pos['center'] = 0.5 * (pos.max(axis=0) + pos.min(axis=0))
+        pos -= head_pos['center']
 
-        # Define the outline of the head, ears and nose
         if outlines is not None:
-            outlines = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
-                            ear_left=(ear_x,  ear_y),
-                            ear_right=(-ear_x,  ear_y))
+            # Define the outline of the head, ears and nose
+            outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
+                                 ear_left=(ear_x, ear_y),
+                                 ear_right=(-ear_x, ear_y))
+        else:
+            outlines_dict = dict()
+
+        if outlines == 'skirt':
+            if 'scale' not in head_pos:
+                # By default, fit electrodes inside the head circle
+                head_pos['scale'] = 1.0 / (pos.max(axis=0) - pos.min(axis=0))
+            pos *= head_pos['scale']
+
+            # Make the figure encompass slightly more than all points
+            mask_scale = 1.25 * (pos.max(axis=0) - pos.min(axis=0))
+
+            outlines_dict['autoshrink'] = False
+            outlines_dict['mask_pos'] = (mask_scale[0] * head_x,
+                                         mask_scale[1] * head_y)
+            outlines_dict['clip_radius'] = (mask_scale / 2.)
         else:
-            outlines = dict()
+            if 'scale' not in head_pos:
+                # The default is to make the points occupy a slightly smaller
+                # proportion (0.85) of the total width and height
+                # this number was empirically determined (seems to work well)
+                head_pos['scale'] = 0.85 / (pos.max(axis=0) - pos.min(axis=0))
+            pos *= head_pos['scale']
+            outlines_dict['autoshrink'] = True
+            outlines_dict['mask_pos'] = head_x, head_y
+            outlines_dict['clip_radius'] = (0.5, 0.5)
+
+        outlines = outlines_dict
 
-        outlines['mask_pos'] = head_x, head_y
     elif isinstance(outlines, dict):
         if 'mask_pos' not in outlines:
             raise ValueError('You must specify the coordinates of the image'
@@ -247,28 +322,6 @@ def _check_outlines(pos, outlines, head_scale=0.85):
     return pos, outlines
 
 
-def _inside_contour(pos, contour):
-    """Aux function"""
-    npos, ncnt = len(pos), len(contour)
-    x, y = pos[:, :2].T
-
-    check_mask = np.ones((npos), dtype=bool)
-    check_mask[((x < np.min(x)) | (y < np.min(y)) |
-                (x > np.max(x)) | (y > np.max(y)))] = False
-
-    critval = 0.1
-    sel = np.where(check_mask)[0]
-    for this_sel in sel:
-        contourx = contour[:, 0] - pos[this_sel, 0]
-        contoury = contour[:, 1] - pos[this_sel, 1]
-        angle = np.arctan2(contoury, contourx)
-        angle = np.unwrap(angle)
-        total = np.sum(np.diff(angle))
-        check_mask[this_sel] = np.abs(total) > critval
-
-    return check_mask
-
-
 def _griddata(x, y, v, xi, yi):
     """Aux function"""
     xy = x.ravel() + y.ravel() * -1j
@@ -301,10 +354,21 @@ def _griddata(x, y, v, xi, yi):
     return zi
 
 
-def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
+def _plot_sensors(pos_x, pos_y, sensors, ax):
+    """Aux function"""
+    from matplotlib.patches import Circle
+    if sensors is True:
+        for x, y in zip(pos_x, pos_y):
+            ax.add_artist(Circle(xy=(x, y), radius=0.003, color='k'))
+    else:
+        ax.plot(pos_x, pos_y, sensors)
+
+
+def plot_topomap(data, pos, vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
                  res=64, axis=None, names=None, show_names=False, mask=None,
                  mask_params=None, outlines='head', image_mask=None,
-                 contours=6, image_interp='bilinear'):
+                 contours=6, image_interp='bilinear', show=True,
+                 head_pos=None, onselect=None):
     """Plot a topographic map as image
 
     Parameters
@@ -313,20 +377,20 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
         The data values to plot.
     pos : array, shape = (n_points, 2)
         For each data point, the x and y coordinates.
-    vmin : float | callable
-        The value specfying the lower bound of the color range.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
         If None, and vmax is None, -vmax is used. Else np.min(data).
-        If callable, the output equals vmin(data).
-    vmax : float | callable
-        The value specfying the upper bound of the color range.
-        If None, the maximum absolute value is used. If vmin is None,
-        but vmax is not, defaults to np.min(data).
-        If callable, the output equals vmax(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
     cmap : matplotlib colormap
         Colormap.
     sensors : bool | str
         Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
     res : int
         The resolution of the topomap image (n pixels along each side).
     axis : instance of Axis | None
@@ -344,22 +408,41 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
         Indices set to `True` will be considered. Defaults to None.
     mask_params : dict | None
         Additional plotting parameters for plotting significant sensors.
-        Default (None) equals:
-        dict(marker='o', markerfacecolor='w', markeredgecolor='k', linewidth=0,
-             markersize=4)
-    outlines : 'head' | dict | None
-        The outlines to be drawn. If 'head', a head scheme will be drawn. If
-        dict, each key refers to a tuple of x and y positions. The values in
-        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Default (None) equals::
+
+           dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                linewidth=0, markersize=4)
+
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
         Defaults to 'head'.
     image_mask : ndarray of bool, shape (res, res) | None
         The image mask to cover the interpolated surface. If None, it will be
         computed from the outline.
-    contour : int | False | None
+    contours : int | False | None
         The number of contour lines to draw. If 0, no contours will be drawn.
     image_interp : str
         The image interpolation to be used. All matplotlib options are
         accepted.
+    show : bool
+        Show figure if True.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+    onselect : callable | None
+        Handle for a function that is called when the user selects a set of
+        channels by rectangle selection (matplotlib ``RectangleSelector``). If
+        None interactive selection is disabled. Defaults to None.
 
     Returns
     -------
@@ -369,28 +452,47 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
         The fieldlines.
     """
     import matplotlib.pyplot as plt
+    from matplotlib.widgets import RectangleSelector
 
     data = np.asarray(data)
     if data.ndim > 1:
-        err = ("Data needs to be array of shape (n_sensors,); got shape "
-               "%s." % str(data.shape))
-        raise ValueError(err)
-    elif len(data) != len(pos):
-        err = ("Data and pos need to be of same length. Got data of shape %s, "
-               "pos of shape %s." % (str(), str()))
-
-    axes = plt.gca()
-    axes.set_frame_on(False)
+        raise ValueError("Data needs to be array of shape (n_sensors,); got "
+                         "shape %s." % str(data.shape))
+
+    # Give a helpful error message for common mistakes regarding the position
+    # matrix.
+    pos_help = ("Electrode positions should be specified as a 2D array with "
+                "shape (n_channels, 2). Each row in this matrix contains the "
+                "(x, y) position of an electrode.")
+    if pos.ndim != 2:
+        error = ("{ndim}D array supplied as electrode positions, where a 2D "
+                 "array was expected").format(ndim=pos.ndim)
+        raise ValueError(error + " " + pos_help)
+    elif pos.shape[1] == 3:
+        error = ("The supplied electrode positions matrix contains 3 columns. "
+                 "Are you trying to specify XYZ coordinates? Perhaps the "
+                 "mne.channels.create_eeg_layout function is useful for you.")
+        raise ValueError(error + " " + pos_help)
+    # No error is raised in case of pos.shape[1] == 4. In this case, it is
+    # assumed the position matrix contains both (x, y) and (width, height)
+    # values, such as Layout.pos.
+    elif pos.shape[1] == 1 or pos.shape[1] > 4:
+        raise ValueError(pos_help)
+
+    if len(data) != len(pos):
+        raise ValueError("Data and pos need to be of same length. Got data of "
+                         "length %s, pos of length %s" % (len(data), len(pos)))
 
     vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
 
-    plt.xticks(())
-    plt.yticks(())
-    pos, outlines = _check_outlines(pos, outlines)
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
     pos_x = pos[:, 0]
     pos_y = pos[:, 1]
 
     ax = axis if axis else plt.gca()
+    ax.set_xticks([])
+    ax.set_yticks([])
+    ax.set_frame_on(False)
     if any([not pos_y.any(), not pos_x.any()]):
         raise RuntimeError('No position information found, cannot compute '
                            'geometries for topomap.')
@@ -401,10 +503,10 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
         xlim = np.inf, -np.inf,
         ylim = np.inf, -np.inf,
         mask_ = np.c_[outlines['mask_pos']]
-        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0] * 1.01]),
-                      np.max(np.r_[xlim[1], mask_[:, 0] * 1.01]))
-        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1] * 1.01]),
-                      np.max(np.r_[ylim[1], mask_[:, 1] * 1.01]))
+        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
+                      np.max(np.r_[xlim[1], mask_[:, 0]]))
+        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
+                      np.max(np.r_[ylim[1], mask_[:, 1]]))
 
     # interpolate data
     xi = np.linspace(xmin, xmax, res)
@@ -415,36 +517,29 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
     if outlines is None:
         _is_default_outlines = False
     elif isinstance(outlines, dict):
-        _is_default_outlines = any([k.startswith('head') for k in outlines])
+        _is_default_outlines = any(k.startswith('head') for k in outlines)
 
     if _is_default_outlines and image_mask is None:
         # prepare masking
         image_mask, pos = _make_image_mask(outlines, pos, res)
 
-    if image_mask is not None and not _is_default_outlines:
-        Zi[~image_mask] = np.nan
+    mask_params = _handle_default('mask_params', mask_params)
 
-    if mask_params is None:
-        mask_params = DEFAULTS['mask_params'].copy()
-    elif isinstance(mask_params, dict):
-        params = dict((k, v) for k, v in DEFAULTS['mask_params'].items()
-                      if k not in mask_params)
-        mask_params.update(params)
-    else:
-        raise ValueError('`mask_params` must be of dict-type '
-                         'or None')
+    # plot outline
+    linewidth = mask_params['markeredgewidth']
+    patch = None
+    if 'patch' in outlines:
+        patch = outlines['patch']
+        patch_ = patch() if callable(patch) else patch
+        patch_.set_clip_on(False)
+        ax.add_patch(patch_)
+        ax.set_transform(ax.transAxes)
+        ax.set_clip_path(patch_)
 
     # plot map and countour
     im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                    aspect='equal', extent=(xmin, xmax, ymin, ymax),
                    interpolation=image_interp)
-    # plot outline
-    linewidth = mask_params['markeredgewidth']
-    if isinstance(outlines, dict):
-        for k, (x, y) in outlines.items():
-            if 'mask' in k:
-                continue
-            ax.plot(x, y, color='k', linewidth=linewidth)
 
     # This tackles an incomprehensible matplotlib bug if no contours are
     # drawn. To avoid rescalings, we will always draw contours.
@@ -460,39 +555,57 @@ def plot_topomap(data, pos, vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
 
     if _is_default_outlines:
         from matplotlib import patches
-        # remove nose offset and tweak
-        patch = patches.Circle((0.5, 0.4687), radius=.46,
-                               clip_on=True,
-                               transform=ax.transAxes)
-        im.set_clip_path(patch)
-        ax.set_clip_path(patch)
+        patch_ = patches.Ellipse((0, 0),
+                                 2 * outlines['clip_radius'][0],
+                                 2 * outlines['clip_radius'][1],
+                                 clip_on=True,
+                                 transform=ax.transData)
+    if _is_default_outlines or patch is not None:
+        im.set_clip_path(patch_)
+        # ax.set_clip_path(patch_)
         if cont is not None:
             for col in cont.collections:
-                col.set_clip_path(patch)
+                col.set_clip_path(patch_)
 
-    if sensors is True:
-        sensors = 'k,'
-    if sensors and mask is None:
-        ax.plot(pos_x, pos_y, sensors)
+    if sensors is not False and mask is None:
+        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
     elif sensors and mask is not None:
         idx = np.where(mask)[0]
         ax.plot(pos_x[idx], pos_y[idx], **mask_params)
         idx = np.where(~mask)[0]
-        ax.plot(pos_x[idx], pos_y[idx], sensors)
+        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
+    elif not sensors and mask is not None:
+        idx = np.where(mask)[0]
+        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
+
+    if isinstance(outlines, dict):
+        outlines_ = dict([(k, v) for k, v in outlines.items() if k not in
+                          ['patch', 'autoshrink']])
+        for k, (x, y) in outlines_.items():
+            if 'mask' in k:
+                continue
+            ax.plot(x, y, color='k', linewidth=linewidth, clip_on=False)
 
     if show_names:
         if show_names is True:
-            show_names = lambda x: x
+            def _show_names(x):
+                return x
+        else:
+            _show_names = show_names
         show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
         for ii, (p, ch_id) in enumerate(zip(pos, names)):
             if ii not in show_idx:
                 continue
-            ch_id = show_names(ch_id)
+            ch_id = _show_names(ch_id)
             ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                     verticalalignment='center', size='x-small')
 
     plt.subplots_adjust(top=.95)
 
+    if onselect is not None:
+        ax.RS = RectangleSelector(ax, onselect=onselect)
+    if show:
+        plt.show()
     return im, cont
 
 
@@ -506,14 +619,16 @@ def _make_image_mask(outlines, pos, res):
     ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
                   np.max(np.r_[-np.inf, mask_[:, 1]]))
 
-    inside = _inside_contour(pos, mask_)
-    outside = np.invert(inside)
-    outlier_points = pos[outside]
-    while np.any(outlier_points):  # auto shrink
-        pos *= 0.99
+    if outlines.get('autoshrink', False) is not False:
         inside = _inside_contour(pos, mask_)
         outside = np.invert(inside)
         outlier_points = pos[outside]
+        while np.any(outlier_points):  # auto shrink
+            pos *= 0.99
+            inside = _inside_contour(pos, mask_)
+            outside = np.invert(inside)
+            outlier_points = pos[outside]
+
     image_mask = np.zeros((res, res), dtype=bool)
     xi_mask = np.linspace(xmin, xmax, res)
     yi_mask = np.linspace(ymin, ymax, res)
@@ -526,24 +641,33 @@ def _make_image_mask(outlines, pos, res):
     return image_mask, pos
 
 
- at deprecated('`plot_ica_topomap` is deprecated and will be removed in '
-            'MNE 1.0. Use `plot_ica_components` instead')
-def plot_ica_topomap(ica, source_idx, ch_type='mag', res=64, layout=None,
-                     vmax=None, cmap='RdBu_r', sensors='k,', colorbar=True,
-                     show=True):
-    """This functoin is deprecated
+def _inside_contour(pos, contour):
+    """Aux function"""
+    npos = len(pos)
+    x, y = pos[:, :2].T
 
-    See ``plot_ica_components``.
-    """
-    return plot_ica_components(ica, source_idx, ch_type, res, layout,
-                               vmax, cmap, sensors, colorbar)
+    check_mask = np.ones((npos), dtype=bool)
+    check_mask[((x < np.min(x)) | (y < np.min(y)) |
+                (x > np.max(x)) | (y > np.max(y)))] = False
 
+    critval = 0.1
+    sel = np.where(check_mask)[0]
+    for this_sel in sel:
+        contourx = contour[:, 0] - pos[this_sel, 0]
+        contoury = contour[:, 1] - pos[this_sel, 1]
+        angle = np.arctan2(contoury, contourx)
+        angle = np.unwrap(angle)
+        total = np.sum(np.diff(angle))
+        check_mask[this_sel] = np.abs(total) > critval
+
+    return check_mask
 
-def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
+
+def plot_ica_components(ica, picks=None, ch_type=None, res=64,
                         layout=None, vmin=None, vmax=None, cmap='RdBu_r',
-                        sensors='k,', colorbar=False, title=None,
+                        sensors=True, colorbar=False, title=None,
                         show=True, outlines='head', contours=6,
-                        image_interp='bilinear'):
+                        image_interp='bilinear', head_pos=None):
     """Project unmixing matrix on interpolated sensor topogrpahy.
 
     Parameters
@@ -553,43 +677,57 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
     picks : int | array-like | None
         The indices of the sources to be plotted.
         If None all are plotted in batches of 20.
-    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
         The channel type to plot. For 'grad', the gradiometers are
         collected in pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    res : int
+        The resolution of the topomap image (n pixels along each side).
     layout : None | Layout
         Layout instance specifying sensor positions (does not need to
         be specified for Neuromag data). If possible, the correct layout is
         inferred from the data.
-    vmin : float | callable
-        The value specfying the lower bound of the color range.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
         If None, and vmax is None, -vmax is used. Else np.min(data).
-        If callable, the output equals vmin(data).
-    vmax : float | callable
-        The value specfying the upper bound of the color range.
-        If None, the maximum absolute value is used. If vmin is None,
-        but vmax is not, defaults to np.min(data).
-        If callable, the output equals vmax(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
     cmap : matplotlib colormap
         Colormap.
     sensors : bool | str
         Add markers for sensor locations to the plot. Accepts matplotlib
-        plot format string (e.g., 'r+' for red plusses).
+        plot format string (e.g., 'r+' for red plusses). If True, a circle
+        will be used (via .add_artist). Defaults to True.
     colorbar : bool
         Plot a colorbar.
-    res : int
-        The resolution of the topomap image (n pixels along each side).
+    title : str | None
+        Title to use.
     show : bool
-        Call pyplot.show() at the end.
-    outlines : 'head' | dict | None
-            The outlines to be drawn. If 'head', a head scheme will be drawn.
-            If dict, each key refers to a tuple of x and y positions. The
-            values in 'mask_pos' will serve as image mask. If None,
-            nothing will be drawn. defaults to 'head'.
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
     contours : int | False | None
         The number of contour lines to draw. If 0, no contours will be drawn.
     image_interp : str
         The image interpolation to be used. All matplotlib options are
         accepted.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
 
     Returns
     -------
@@ -598,8 +736,10 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
     """
     import matplotlib.pyplot as plt
     from mpl_toolkits.axes_grid import make_axes_locatable
+    from ..channels import _get_ch_type
 
     if picks is None:  # plot components by sets of 20
+        ch_type = _get_ch_type(ica, ch_type)
         n_components = ica.mixing_matrix_.shape[1]
         p = 20
         figs = []
@@ -616,6 +756,7 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
         return figs
     elif np.isscalar(picks):
         picks = [picks]
+    ch_type = _get_ch_type(ica, ch_type)
 
     data = np.dot(ica.mixing_matrix_[:, picks].T,
                   ica.pca_components_[:ica.n_components_])
@@ -624,9 +765,9 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
         raise RuntimeError('The ICA\'s measurement info is missing. Please '
                            'fit the ICA or add the corresponding info object.')
 
-    data_picks, pos, merge_grads, names = _prepare_topo_plot(ica, ch_type,
-                                                             layout)
-    pos, outlines = _check_outlines(pos, outlines)
+    data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type,
+                                                                layout)
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
     if outlines not in (None, 'head'):
         image_mask, pos = _make_image_mask(outlines, pos, res)
     else:
@@ -642,7 +783,7 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
     fig.suptitle(title)
 
     if merge_grads:
-        from ..layouts.layout import _merge_grad_data
+        from ..channels.layout import _merge_grad_data
     for ii, data_, ax in zip(picks, data, axes):
         ax.set_title('IC #%03d' % ii, fontsize=12)
         data_ = _merge_grad_data(data_) if merge_grads else data_
@@ -650,7 +791,7 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
         im = plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
                           res=res, axis=ax, cmap=cmap, outlines=outlines,
                           image_mask=image_mask, contours=contours,
-                          image_interp=image_interp)[0]
+                          image_interp=image_interp, show=False)[0]
         if colorbar:
             divider = make_axes_locatable(ax)
             cax = divider.append_axes("right", size="5%", pad=0.05)
@@ -671,10 +812,11 @@ def plot_ica_components(ica, picks=None, ch_type='mag', res=64,
 
 
 def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
-                     ch_type='mag', baseline=None, mode='mean', layout=None,
-                     vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
-                     colorbar=True, unit=None, res=64, size=2, format='%1.1e',
-                     show_names=False, title=None, axes=None, show=True):
+                     ch_type=None, baseline=None, mode='mean', layout=None,
+                     vmin=None, vmax=None, cmap=None, sensors=True,
+                     colorbar=True, unit=None, res=64, size=2,
+                     cbar_fmt='%1.1e', show_names=False, title=None,
+                     axes=None, show=True, outlines='head', head_pos=None):
     """Plot topographic maps of specific time-frequency intervals of TFR data
 
     Parameters
@@ -693,9 +835,10 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
     fmax : None | float
         The last frequency to display. If None the last frequency
         available is used.
-    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
         The channel type to plot. For 'grad', the gradiometers are
         collected in pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
     baseline : tuple or list of length 2
         The time interval to apply rescaling / baseline correction.
         If None do not apply it. If baseline is (a, b)
@@ -716,21 +859,23 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
         file is inferred from the data; if no appropriate layout file
         was found, the layout is automatically generated from the sensor
         locations.
-    vmin : float | callable
-        The value specfying the lower bound of the color range.
-        If None, and vmax is None, -vmax is used. Else np.min(data).
-        If callable, the output equals vmin(data).
-    vmax : float | callable
-        The value specfying the upper bound of the color range.
-        If None, the maximum absolute value is used. If vmin is None,
-        but vmax is not, defaults to np.min(data).
-        If callable, the output equals vmax(data).
-    cmap : matplotlib colormap
-        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
-        'Reds'.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None, and vmax is None, -vmax is used. Else np.min(data) or in case
+        data contains only positive values 0. If callable, the output equals
+        vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range. If None, the
+        maximum value is used. If callable, the output equals vmax(data).
+        Defaults to None.
+    cmap : matplotlib colormap | None
+        Colormap. If None and the plotted data is all positive, defaults to
+        'Reds'. If None and data contains also negative values, defaults to
+        'RdBu_r'. Defaults to None.
     sensors : bool | str
         Add markers for sensor locations to the plot. Accepts matplotlib
-        plot format string (e.g., 'r+' for red plusses).
+        plot format string (e.g., 'r+' for red plusses). If True, a circle will
+        be used (via .add_artist). Defaults to True.
     colorbar : bool
         Plot a colorbar.
     unit : str | None
@@ -739,7 +884,7 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
         The resolution of the topomap image (n pixels along each side).
     size : float
         Side length per topomap in inches.
-    format : str
+    cbar_fmt : str
         String format for colorbar values.
     show_names : bool | callable
         If True, show channel names on top of the map. If a callable is
@@ -752,18 +897,36 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
     axes : instance of Axis | None
         The axes to plot to. If None the axes is defined automatically.
     show : bool
-        Call pyplot.show() at the end.
+        Show figure if True.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
 
     Returns
     -------
     fig : matplotlib.figure.Figure
         The figure containing the topography.
     """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(tfr, ch_type)
     import matplotlib.pyplot as plt
     from mpl_toolkits.axes_grid1 import make_axes_locatable
 
-    picks, pos, merge_grads, names = _prepare_topo_plot(tfr, ch_type,
-                                                        layout)
+    picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type,
+                                                           layout)
     if not show_names:
         names = None
 
@@ -774,26 +937,31 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
 
     # crop time
     itmin, itmax = None, None
+    idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
     if tmin is not None:
-        itmin = np.where(tfr.times >= tmin)[0][0]
+        itmin = idx[0]
     if tmax is not None:
-        itmax = np.where(tfr.times <= tmax)[0][-1]
+        itmax = idx[-1] + 1
 
     # crop freqs
     ifmin, ifmax = None, None
+    idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
     if fmin is not None:
-        ifmin = np.where(tfr.freqs >= fmin)[0][0]
+        ifmin = idx[0]
     if fmax is not None:
-        ifmax = np.where(tfr.freqs <= fmax)[0][-1]
+        ifmax = idx[-1] + 1
 
     data = data[picks, ifmin:ifmax, itmin:itmax]
     data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
 
     if merge_grads:
-        from ..layouts.layout import _merge_grad_data
+        from ..channels.layout import _merge_grad_data
         data = _merge_grad_data(data)
 
-    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+    norm = False if np.min(data) < 0 else True
+    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
+    if cmap is None:
+        cmap = 'Reds' if norm else 'RdBu_r'
 
     if axes is None:
         fig = plt.figure()
@@ -808,15 +976,21 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
 
     if title is not None:
         ax.set_title(title)
+    fig_wrapper = list()
+    selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
+                                 itmin=itmin, itmax=itmax, ifmin=ifmin,
+                                 ifmax=ifmax, cmap=cmap, fig=fig_wrapper,
+                                 layout=layout)
 
     im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
                          axis=ax, cmap=cmap, image_interp='bilinear',
-                         contours=False, names=names)
+                         contours=False, names=names, show_names=show_names,
+                         show=False, onselect=selection_callback)
 
     if colorbar:
         divider = make_axes_locatable(ax)
         cax = divider.append_axes("right", size="5%", pad=0.05)
-        cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
+        cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap)
         cbar.set_ticks((vmin, vmax))
         cbar.ax.tick_params(labelsize=12)
         cbar.ax.set_title('AU')
@@ -827,61 +1001,66 @@ def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
     return fig
 
 
-def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
-                        vmax=None, vmin=None, cmap='RdBu_r', sensors='k,',
+def plot_evoked_topomap(evoked, times="auto", ch_type=None, layout=None,
+                        vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
                         colorbar=True, scale=None, scale_time=1e3, unit=None,
-                        res=64, size=1, format='%3.1f',
+                        res=64, size=1, cbar_fmt='%3.1f',
                         time_format='%01d ms', proj=False, show=True,
                         show_names=False, title=None, mask=None,
                         mask_params=None, outlines='head', contours=6,
-                        image_interp='bilinear'):
+                        image_interp='bilinear', average=None, head_pos=None,
+                        axes=None):
     """Plot topographic maps of specific time points of evoked data
 
     Parameters
     ----------
     evoked : Evoked
         The Evoked object.
-    times : float | array of floats | None.
-        The time point(s) to plot. If None, 10 topographies will be shown
-        will a regular time spacing between the first and last time instant.
-    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
+    times : float | array of floats | "auto" | "peaks".
+        The time point(s) to plot. If "auto", the number of ``axes`` determines
+        the amount of time point(s). If ``axes`` is also None, 10 topographies
+        will be shown with a regular time spacing between the first and last
+        time instant. If "peaks", finds time points automatically by checking
+        for local maxima in global field power.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
         The channel type to plot. For 'grad', the gradiometers are collected in
         pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
     layout : None | Layout
         Layout instance specifying sensor positions (does not need to
         be specified for Neuromag data). If possible, the correct layout file
         is inferred from the data; if no appropriate layout file was found, the
         layout is automatically generated from the sensor locations.
-    vmin : float | callable
-        The value specfying the lower bound of the color range.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
         If None, and vmax is None, -vmax is used. Else np.min(data).
-        If callable, the output equals vmin(data).
-    vmax : float | callable
-        The value specfying the upper bound of the color range.
-        If None, the maximum absolute value is used. If vmin is None,
-        but vmax is not, defaults to np.min(data).
-        If callable, the output equals vmax(data).
+        If callable, the output equals vmin(data). Defaults to None.
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
     cmap : matplotlib colormap
         Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
         'Reds'.
     sensors : bool | str
         Add markers for sensor locations to the plot. Accepts matplotlib plot
-        format string (e.g., 'r+' for red plusses).
+        format string (e.g., 'r+' for red plusses). If True, a circle will be
+        used (via .add_artist). Defaults to True.
     colorbar : bool
         Plot a colorbar.
-    scale : float | None
+    scale : dict | float | None
         Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
         for grad and 1e15 for mag.
     scale_time : float | None
         Scale the time labels. Defaults to 1e3 (ms).
-    unit : str | None
+    unit : dict | str | None
         The unit of the channel type used for colorbar label. If
         scale is None the unit is automatically determined.
     res : int
         The resolution of the topomap image (n pixels along each side).
     size : float
         Side length per topomap in inches.
-    format : str
+    cbar_fmt : str
         String format for colorbar values.
     time_format : str
         String format for topomap values. Defaults to "%01d ms"
@@ -890,7 +1069,7 @@ def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
         a check box for reversible selection of SSP projection vectors will
         be show.
     show : bool
-        Call pyplot.show() at the end.
+        Show figure if True.
     show_names : bool | callable
         If True, show channel names on top of the map. If a callable is
         passed, channel names will be formatted using the callable; e.g., to
@@ -904,61 +1083,122 @@ def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
         Indicies set to `True` will be considered. Defaults to None.
     mask_params : dict | None
         Additional plotting parameters for plotting significant sensors.
-        Default (None) equals:
-        dict(marker='o', markerfacecolor='w', markeredgecolor='k', linewidth=0,
-             markersize=4)
-    outlines : 'head' | dict | None
-        The outlines to be drawn. If 'head', a head scheme will be drawn. If
-        dict, each key refers to a tuple of x and y positions. The values in
-        'mask_pos' will serve as image mask. If None, nothing will be drawn.
+        Default (None) equals::
+
+            dict(marker='o', markerfacecolor='w', markeredgecolor='k',
+                 linewidth=0, markersize=4)
+
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
         Defaults to 'head'.
     contours : int | False | None
         The number of contour lines to draw. If 0, no contours will be drawn.
     image_interp : str
         The image interpolation to be used. All matplotlib options are
         accepted.
+    average : float | None
+        The time window around a given time to be used for averaging (seconds).
+        For example, 0.01 would translate into window that starts 5 ms before
+        and ends 5 ms after a given time point. Defaults to None, which means
+        no averaging.
+    head_pos : dict | None
+        If None (default), the sensors are positioned such that they span
+        the head circle. If dict, can have entries 'center' (tuple) and
+        'scale' (tuple) for what the center and scale of the head should be
+        relative to the electrode locations.
+    axes : instance of Axes | list | None
+        The axes to plot to. If list, the list must be a list of Axes of the
+        same length as ``times`` (unless ``times`` is None). If instance of
+        Axes, ``times`` must be a float or a list of one float.
+        Defaults to None.
+
+    Returns
+    -------
+    fig : instance of matplotlib.figure.Figure
+       The figure.
     """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(evoked, ch_type)
     import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable  # noqa
 
-    if ch_type.startswith('planar'):
-        key = 'grad'
-    else:
-        key = ch_type
-
-    if scale is None:
-        scale = DEFAULTS['scalings'][key]
-        unit = DEFAULTS['units'][key]
+    mask_params = _handle_default('mask_params', mask_params)
+    mask_params['markersize'] *= size / 2.
+    mask_params['markeredgewidth'] *= size / 2.
 
-    if mask_params is None:
-        mask_params = DEFAULTS['mask_params'].copy()
-        mask_params['markersize'] *= size / 2.
-        mask_params['markeredgewidth'] *= size / 2.
+    if isinstance(axes, plt.Axes):
+        axes = [axes]
 
-    if times is None:
-        times = np.linspace(evoked.times[0], evoked.times[-1], 10)
+    if times == "peaks":
+        npeaks = 10 if axes is None else len(axes)
+        times = _find_peaks(evoked, npeaks)
+    elif times == "auto":
+        if axes is None:
+            times = np.linspace(evoked.times[0], evoked.times[-1], 10)
+        else:
+            times = np.linspace(evoked.times[0], evoked.times[-1], len(axes))
     elif np.isscalar(times):
         times = [times]
+
+    times = np.array(times)
+
+    if times.ndim != 1:
+        raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
     if len(times) > 20:
         raise RuntimeError('Too many plots requested. Please pass fewer '
                            'than 20 time instants.')
+
+    n_times = len(times)
+    nax = n_times + bool(colorbar)
+    width = size * nax
+    height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
+    if axes is None:
+        plt.figure(figsize=(width, height))
+        axes = list()
+        for ax_idx in range(len(times)):
+            if colorbar:  # Make room for the colorbar
+                axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
+            else:
+                axes.append(plt.subplot(1, n_times, ax_idx + 1))
+    elif colorbar:
+        logger.warning('Colorbar is drawn to the rightmost column of the '
+                       'figure.\nBe sure to provide enough space for it '
+                       'or turn it off with colorbar=False.')
+    if len(axes) != n_times:
+        raise RuntimeError('Axes and times must be equal in sizes.')
     tmin, tmax = evoked.times[[0, -1]]
-    for t in times:
-        if not tmin <= t <= tmax:
-            raise ValueError('Times should be between %0.3f and %0.3f. (Got '
-                             '%0.3f).' % (tmin, tmax, t))
+    _time_comp = _time_mask(times=times, tmin=tmin,  tmax=tmax)
+    if not np.all(_time_comp):
+        raise ValueError('Times should be between {0:0.3f} and {1:0.3f}. (Got '
+                         '{2}).'.format(tmin, tmax,
+                                        ['%03.f' % t
+                                         for t in times[_time_comp]]))
+
+    picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
+        evoked, ch_type, layout)
+
+    if ch_type.startswith('planar'):
+        key = 'grad'
+    else:
+        key = ch_type
+
+    scale = _handle_default('scalings', scale)[key]
+    unit = _handle_default('units', unit)[key]
 
-    picks, pos, merge_grads, names = _prepare_topo_plot(evoked, ch_type,
-                                                        layout)
     if not show_names:
         names = None
 
-    n = len(times)
-    nax = n + bool(colorbar)
-    width = size * nax
-    height = size * 1. + max(0, 0.1 * (4 - size))
-    fig = plt.figure(figsize=(width, height))
     w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
-    top_frame = max((0.05 if title is None else 0.15), .2 / size)
+    top_frame = max((0.05 if title is None else 0.25), .2 / size)
+    fig = axes[0].get_figure()
     fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
                         top=1 - top_frame)
     time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
@@ -967,10 +1207,28 @@ def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
         data = evoked.copy().apply_proj().data
     else:
         data = evoked.data
+    if average is None:
+        data = data[np.ix_(picks, time_idx)]
+    elif isinstance(average, float):
+        if not average > 0:
+            raise ValueError('The average parameter must be positive. You '
+                             'passed a negative value')
+        data_ = np.zeros((len(picks), len(time_idx)))
+        ave_time = float(average) / 2.
+        iter_times = evoked.times[time_idx]
+        for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
+                                                     iter_times - ave_time,
+                                                     iter_times + ave_time)):
+            my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
+            data_[:, ii] = data[picks][:, my_range].mean(-1)
+        data = data_
+    else:
+        raise ValueError('The average parameter must be None or a float.'
+                         'Check your input.')
 
-    data = data[np.ix_(picks, time_idx)] * scale
+    data *= scale
     if merge_grads:
-        from ..layouts.layout import _merge_grad_data
+        from ..channels.layout import _merge_grad_data
         data = _merge_grad_data(data)
 
     vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
@@ -981,41 +1239,46 @@ def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
         _picks = picks[::2 if ch_type not in ['mag', 'eeg'] else 1]
         mask_ = mask[np.ix_(_picks, time_idx)]
 
-    pos, outlines = _check_outlines(pos, outlines)
+    pos, outlines = _check_outlines(pos, outlines, head_pos)
     if outlines is not None:
         image_mask, pos = _make_image_mask(outlines, pos, res)
     else:
         image_mask = None
 
-    for i, t in enumerate(times):
-        ax = plt.subplot(1, nax, i + 1)
-        tp, cn = plot_topomap(data[:, i], pos, vmin=vmin, vmax=vmax,
+    for idx, time in enumerate(times):
+        tp, cn = plot_topomap(data[:, idx], pos, vmin=vmin, vmax=vmax,
                               sensors=sensors, res=res, names=names,
                               show_names=show_names, cmap=cmap,
-                              mask=mask_[:, i] if mask is not None else None,
-                              mask_params=mask_params, axis=ax,
+                              mask=mask_[:, idx] if mask is not None else None,
+                              mask_params=mask_params, axis=axes[idx],
                               outlines=outlines, image_mask=image_mask,
-                              contours=contours, image_interp=image_interp)
+                              contours=contours, image_interp=image_interp,
+                              show=False)
+
         images.append(tp)
         if cn is not None:
             contours_.append(cn)
         if time_format is not None:
-            plt.title(time_format % (t * scale_time))
+            axes[idx].set_title(time_format % (time * scale_time))
+
+    if title is not None:
+        plt.suptitle(title, verticalalignment='top', size='x-large')
+        tight_layout(pad=size, fig=fig)
 
     if colorbar:
-        cax = plt.subplot(1, n + 1, n + 1)
-        plt.colorbar(images[-1], ax=cax, cax=cax, ticks=[vmin, 0, vmax],
-                     format=format)
+        cax = plt.subplot(1, n_times + 1, n_times + 1)
         # resize the colorbar (by default the color fills the whole axes)
         cpos = cax.get_position()
         if size <= 1:
             cpos.x0 = 1 - (.7 + .1 / size) / nax
         cpos.x1 = cpos.x0 + .1 / nax
-        cpos.y0 = .1
+        cpos.y0 = .2
         cpos.y1 = .7
         cax.set_position(cpos)
         if unit is not None:
             cax.set_title(unit)
+        cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
+        cbar.set_ticks([vmin, 0, vmax])
 
     if proj == 'interactive':
         _check_delayed_ssp(evoked)
@@ -1026,10 +1289,334 @@ def plot_evoked_topomap(evoked, times=None, ch_type='mag', layout=None,
                       plot_update_proj_callback=_plot_update_evoked_topomap)
         _draw_proj_checkbox(None, params)
 
-    if title is not None:
-        plt.suptitle(title, verticalalignment='top', size='x-large')
-        tight_layout(pad=2 * size / 2.0, fig=fig)
     if show:
         plt.show()
 
     return fig
+
+
+def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None,
+                             vmin=None, vmax=None, cmap='RdBu_r',
+                             colorbar=False, cbar_fmt='%3.3f'):
+    """Aux Function"""
+    import matplotlib.pyplot as plt
+    from mpl_toolkits.axes_grid1 import make_axes_locatable
+
+    ax.set_yticks([])
+    ax.set_xticks([])
+    ax.set_frame_on(False)
+    vmin = np.min(data) if vmin is None else vmin
+    vmax = np.max(data) if vmax is None else vmax
+
+    if title is not None:
+        ax.set_title(title, fontsize=10)
+    im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axis=ax,
+                         cmap=cmap, image_interp='bilinear', contours=False,
+                         show=False)
+
+    if colorbar is True:
+        divider = make_axes_locatable(ax)
+        cax = divider.append_axes("right", size="10%", pad=0.25)
+        cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
+        cbar.set_ticks((vmin, vmax))
+        if unit is not None:
+            cbar.ax.set_title(unit, fontsize=8)
+        cbar.ax.tick_params(labelsize=8)
+
+
+ at verbose
+def plot_epochs_psd_topomap(epochs, bands=None, vmin=None, vmax=None,
+                            tmin=None, tmax=None,
+                            proj=False, n_fft=256, ch_type=None,
+                            n_overlap=0, layout=None,
+                            cmap='RdBu_r', agg_fun=None, dB=False, n_jobs=1,
+                            normalize=False, cbar_fmt='%0.3f',
+                            outlines='head', show=True, verbose=None):
+    """Plot the topomap of the power spectral density across epochs
+
+    Parameters
+    ----------
+    epochs : instance of Epochs
+        The epochs object
+    bands : list of tuple | None
+        The lower and upper frequency and the name for that band. If None,
+        (default) expands to:
+
+        bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                 (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None np.min(data) is used. If callable, the output equals
+        vmin(data).
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    tmin : float | None
+        Start time to consider.
+    tmax : float | None
+        End time to consider.
+    proj : bool
+        Apply projection.
+    n_fft : int
+        Number of points to use in Welch FFT calculations.
+    ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+        The channel type to plot. For 'grad', the gradiometers are collected in
+        pairs and the RMS for each pair is plotted.
+        If None, then channels are chosen in the order given above.
+    n_overlap : int
+        The number of points of overlap between blocks.
+    layout : None | Layout
+        Layout instance specifying sensor positions (does not need to
+        be specified for Neuromag data). If possible, the correct layout
+        file is inferred from the data; if no appropriate layout file was
+        found, the layout is automatically generated from the sensor
+        locations.
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    agg_fun : callable
+        The function used to aggregate over frequencies.
+        Defaults to np.sum. if normalize is True, else np.mean.
+    dB : bool
+        If True, transform data to decibels (with ``10 * np.log10(data)``)
+        following the application of `agg_fun`. Only valid if normalize is
+        False.
+    n_jobs : int
+        Number of jobs to run in parallel.
+    normalize : bool
+        If True, each band will be devided by the total power. Defaults to
+        False.
+    cbar_fmt : str
+        The colorbar format. Defaults to '%0.3f'.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    show : bool
+        Show figure if True.
+    verbose : bool, str, int, or None
+        If not None, override default verbose level (see mne.verbose).
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+    from ..channels import _get_ch_type
+    ch_type = _get_ch_type(epochs, ch_type)
+
+    picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
+        epochs, ch_type, layout)
+
+    psds, freqs = compute_epochs_psd(epochs, picks=picks, n_fft=n_fft,
+                                     tmin=tmin, tmax=tmax,
+                                     n_overlap=n_overlap, proj=proj,
+                                     n_jobs=n_jobs)
+    psds = np.mean(psds, axis=0)
+
+    if merge_grads:
+        from ..channels.layout import _merge_grad_data
+        psds = _merge_grad_data(psds)
+
+    return plot_psds_topomap(
+        psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun, vmin=vmin,
+        vmax=vmax, bands=bands, cmap=cmap, dB=dB, normalize=normalize,
+        cbar_fmt=cbar_fmt, outlines=outlines, show=show)
+
+
+def plot_psds_topomap(
+        psds, freqs, pos, agg_fun=None, vmin=None, vmax=None, bands=None,
+        cmap='RdBu_r', dB=True, normalize=False, cbar_fmt='%0.3f',
+        outlines='head', show=True):
+    """Plot spatial maps of PSDs
+
+    Parameters
+    ----------
+    psds : np.ndarray of float, shape (n_channels, n_freqs)
+        Power spectral densities
+    freqs : np.ndarray of float, shape (n_freqs)
+        Frequencies used to compute psds.
+    pos : numpy.ndarray of float, shape (n_sensors, 2)
+        The positions of the sensors.
+    agg_fun : callable
+        The function used to aggregate over frequencies.
+        Defaults to np.sum. if normalize is True, else np.mean.
+    vmin : float | callable | None
+        The value specifying the lower bound of the color range.
+        If None np.min(data) is used. If callable, the output equals
+        vmin(data).
+    vmax : float | callable | None
+        The value specifying the upper bound of the color range.
+        If None, the maximum absolute value is used. If callable, the output
+        equals vmax(data). Defaults to None.
+    bands : list of tuple | None
+        The lower and upper frequency and the name for that band. If None,
+        (default) expands to:
+
+            bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                     (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    cmap : matplotlib colormap
+        Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+        'Reds'.
+    dB : bool
+        If True, transform data to decibels (with ``10 * np.log10(data)``)
+        following the application of `agg_fun`. Only valid if normalize is
+        False.
+    normalize : bool
+        If True, each band will be devided by the total power. Defaults to
+        False.
+    cbar_fmt : str
+        The colorbar format. Defaults to '%0.3f'.
+    outlines : 'head' | 'skirt' | dict | None
+        The outlines to be drawn. If 'head', the default head scheme will be
+        drawn. If 'skirt' the head scheme will be drawn, but sensors are
+        allowed to be plotted outside of the head circle. If dict, each key
+        refers to a tuple of x and y positions, the values in 'mask_pos' will
+        serve as image mask, and the 'autoshrink' (bool) field will trigger
+        automated shrinking of the positions due to points outside the outline.
+        Alternatively, a matplotlib patch object can be passed for advanced
+        masking options, either directly or as a function that returns patches
+        (required for multi-axis plots). If None, nothing will be drawn.
+        Defaults to 'head'.
+    show : bool
+        Show figure if True.
+
+    Returns
+    -------
+    fig : instance of matplotlib figure
+        Figure distributing one image per channel across sensor topography.
+    """
+
+    import matplotlib.pyplot as plt
+
+    if bands is None:
+        bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
+                 (12, 30, 'Beta'), (30, 45, 'Gamma')]
+
+    if agg_fun is None:
+        agg_fun = np.sum if normalize is True else np.mean
+
+    if normalize is True:
+        psds /= psds.sum(axis=-1)[..., None]
+        assert np.allclose(psds.sum(axis=-1), 1.)
+
+    n_axes = len(bands)
+    fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
+    if n_axes == 1:
+        axes = [axes]
+
+    for ax, (fmin, fmax, title) in zip(axes, bands):
+        freq_mask = (fmin < freqs) & (freqs < fmax)
+        if freq_mask.sum() == 0:
+            raise RuntimeError('No frequencies in band "%s" (%s, %s)'
+                               % (title, fmin, fmax))
+        data = agg_fun(psds[:, freq_mask], axis=1)
+        if dB is True and normalize is False:
+            data = 10 * np.log10(data)
+            unit = 'dB'
+        else:
+            unit = 'power'
+
+        _plot_topomap_multi_cbar(data, pos, ax, title=title,
+                                 vmin=vmin, vmax=vmax, cmap=cmap,
+                                 colorbar=True, unit=unit, cbar_fmt=cbar_fmt)
+    tight_layout(fig=fig)
+    fig.canvas.draw()
+    if show:
+        plt.show()
+    return fig
+
+
+def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
+              cmap, fig, layout=None):
+    """Callback called from topomap for drawing average tfr over channels."""
+    import matplotlib.pyplot as plt
+    pos, _ = _check_outlines(pos, outlines='head', head_pos=None)
+    ax = eclick.inaxes
+    xmin = min(eclick.xdata, erelease.xdata)
+    xmax = max(eclick.xdata, erelease.xdata)
+    ymin = min(eclick.ydata, erelease.ydata)
+    ymax = max(eclick.ydata, erelease.ydata)
+    indices = [i for i in range(len(pos)) if pos[i][0] < xmax and
+               pos[i][0] > xmin and pos[i][1] < ymax and pos[i][1] > ymin]
+    for idx, circle in enumerate(ax.artists):
+        if idx in indices:
+            circle.set_color('r')
+        else:
+            circle.set_color('black')
+    plt.gcf().canvas.draw()
+    if not indices:
+        return
+    data = tfr.data
+    if ch_type == 'mag':
+        picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
+        data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[picks[x]] for x in indices]
+    elif ch_type == 'grad':
+        picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
+        from ..channels.layout import _pair_grad_sensors
+        grads = _pair_grad_sensors(tfr.info, layout=layout,
+                                   topomap_coords=False)
+        idxs = list()
+        for idx in indices:
+            idxs.append(grads[idx * 2])
+            idxs.append(grads[idx * 2 + 1])  # pair of grads
+        data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[x] for x in idxs]
+    elif ch_type == 'eeg':
+        picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
+        data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
+        chs = [tfr.ch_names[picks[x]] for x in indices]
+    logger.info('Averaging TFR over channels ' + str(chs))
+    if len(fig) == 0:
+        fig.append(figure_nobar())
+    if not plt.fignum_exists(fig[0].number):
+        fig[0] = figure_nobar()
+    ax = fig[0].add_subplot(111)
+    itmax = min(itmax, len(tfr.times) - 1)
+    ifmax = min(ifmax, len(tfr.freqs) - 1)
+    extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
+              tfr.freqs[ifmax])
+
+    title = 'Average over %d %s channels.' % (len(chs), ch_type)
+    ax.set_title(title)
+    ax.set_xlabel('Time (ms)')
+    ax.set_ylabel('Frequency (Hz)')
+    img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
+                    cmap=cmap)
+    if len(fig[0].get_axes()) < 2:
+        fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
+    else:
+        fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
+    fig[0].canvas.draw()
+    plt.figure(fig[0].number)
+    plt.show()
+
+
+def _find_peaks(evoked, npeaks):
+    """Helper function for finding peaks from evoked data
+    Returns ``npeaks`` biggest peaks as a list of time points.
+    """
+    argrelmax = _get_argrelmax()
+    gfp = evoked.data.std(axis=0)
+    order = len(evoked.times) // 30
+    if order < 1:
+        order = 1
+    peaks = argrelmax(gfp, order=order, axis=0)[0]
+    if len(peaks) > npeaks:
+        max_indices = np.argsort(gfp[peaks])[-npeaks:]
+        peaks = np.sort(peaks[max_indices])
+    times = evoked.times[peaks]
+    if len(times) == 0:
+        times = [evoked.times[gfp.argmax()]]
+    return times
diff --git a/mne/viz/utils.py b/mne/viz/utils.py
index 88c5e5b..89796a3 100644
--- a/mne/viz/utils.py
+++ b/mne/viz/utils.py
@@ -11,75 +11,40 @@ from __future__ import print_function
 # License: Simplified BSD
 
 import math
-from copy import deepcopy
 from functools import partial
 import difflib
 import webbrowser
 from warnings import warn
 import tempfile
-
 import numpy as np
 
 from ..io import show_fiff
-from ..utils import verbose
+from ..utils import verbose, set_config
 
 
 COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
           '#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
 
-DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
-                           emg='k', ref_meg='steelblue', misc='k', stim='k',
-                           resp='k', chpi='k', exci='k', ias='k', syst='k'),
-                units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
-                scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
-                scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
-                                       eog=150e-6, ecg=5e-4, emg=1e-3,
-                                       ref_meg=1e-12, misc=1e-3,
-                                       stim=1, resp=1, chpi=1e-4, exci=1,
-                                       ias=1, syst=1),
-                ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
-                          eeg=(-200., 200.), misc=(-5., 5.)),
-                titles=dict(eeg='EEG', grad='Gradiometers',
-                            mag='Magnetometers', misc='misc'),
-                mask_params=dict(marker='o',
-                                 markerfacecolor='w',
-                                 markeredgecolor='k',
-                                 linewidth=0,
-                                 markeredgewidth=1,
-                                 markersize=4))
-
-
-def _mutable_defaults(*mappings):
-    """ To avoid dicts as default keyword arguments
-
-    Use this function instead to resolve default dict values.
-    Example usage:
-    scalings, units = _mutable_defaults(('scalings', scalings,
-                                         'units', units))
-    """
-    out = []
-    for k, v in mappings:
-        this_mapping = DEFAULTS[k]
-        if v is not None:
-            this_mapping = deepcopy(DEFAULTS[k])
-            this_mapping.update(v)
-        out += [this_mapping]
-    return out
-
-
-def _setup_vmin_vmax(data, vmin, vmax):
-    """Aux function to handle vmin and vamx parameters"""
+
+def _setup_vmin_vmax(data, vmin, vmax, norm=False):
+    """Aux function to handle vmin and vmax parameters"""
     if vmax is None and vmin is None:
         vmax = np.abs(data).max()
-        vmin = -vmax
+        if norm:
+            vmin = 0.
+        else:
+            vmin = -vmax
     else:
         if callable(vmin):
             vmin = vmin(data)
         elif vmin is None:
-            vmin = np.min(data)
+            if norm:
+                vmin = 0.
+            else:
+                vmin = np.min(data)
         if callable(vmax):
             vmax = vmax(data)
-        elif vmin is None:
+        elif vmax is None:
             vmax = np.max(data)
     return vmin, vmax
 
@@ -94,33 +59,36 @@ def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
     pad : float
         padding between the figure edge and the edges of subplots, as a
         fraction of the font-size.
-    h_pad, w_pad : float
-        padding (height/width) between edges of adjacent subplots.
+    h_pad : float
+        Padding height between edges of adjacent subplots.
+        Defaults to `pad_inches`.
+    w_pad : float
+        Padding width between edges of adjacent subplots.
         Defaults to `pad_inches`.
+    fig : instance of Figure
+        Figure to apply changes to.
     """
     import matplotlib.pyplot as plt
-    if fig is None:
-        fig = plt.gcf()
+    fig = plt.gcf() if fig is None else fig
 
+    fig.canvas.draw()
     try:  # see https://github.com/matplotlib/matplotlib/issues/2654
-        fig.canvas.draw()
         fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
-    except:
-        msg = ('Matplotlib function \'tight_layout\'%s.'
-               ' Skipping subpplot adjusment.')
-        if not hasattr(plt, 'tight_layout'):
-            case = ' is not available'
-        else:
-            case = (' is not supported by your backend: `%s`'
-                    % plt.get_backend())
-        warn(msg % case)
+    except Exception:
+        warn('Matplotlib function \'tight_layout\' is not supported.'
+             ' Skipping subplot adjusment.')
+    else:
+        try:
+            fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
+        except Exception:
+            pass
 
 
 def _check_delayed_ssp(container):
     """ Aux function to be used for interactive SSP selection
     """
     if container.proj is True or\
-       all([p['active'] for p in container.info['projs']]):
+       all(p['active'] for p in container.info['projs']):
         raise RuntimeError('Projs are already applied. Please initialize'
                            ' the data with proj set to False.')
     elif len(container.info['projs']) < 1:
@@ -132,8 +100,9 @@ def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
 
     Parameters
     ----------
-    limits : list (or array) of length 3
-        Bounds for the colormap.
+    limits : list (or array) of length 3 or 6
+        Bounds for the colormap, which will be mirrored across zero if length
+        3, or completely specified (and potentially asymmetric) if length 6.
     format : str
         Type of colormap to return. If 'matplotlib', will return a
         matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
@@ -158,43 +127,57 @@ def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
         brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
 
     """
-    l = np.asarray(limits, dtype='float')
-    if len(l) != 3:
-        raise ValueError('limits must have 3 elements')
-    if any(l < 0):
-        raise ValueError('limits must all be positive')
-    if any(np.diff(l) <= 0):
+    # Ensure limits is an array
+    limits = np.asarray(limits, dtype='float')
+
+    if len(limits) != 3 and len(limits) != 6:
+        raise ValueError('limits must have 3 or 6 elements')
+    if len(limits) == 3 and any(limits < 0.):
+        raise ValueError('if 3 elements, limits must all be non-negative')
+    if any(np.diff(limits) <= 0):
         raise ValueError('limits must be monotonically increasing')
     if format == 'matplotlib':
         from matplotlib import colors
-        l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
-        cdict = {'red': ((l[0], 0.0, 0.0),
-                         (l[1], 0.0, 0.0),
-                         (l[2], 0.5, 0.5),
-                         (l[3], 0.5, 0.5),
-                         (l[4], 1.0, 1.0),
-                         (l[5], 1.0, 1.0)),
-                 'green': ((l[0], 1.0, 1.0),
-                           (l[1], 0.0, 0.0),
-                           (l[2], 0.5, 0.5),
-                           (l[3], 0.5, 0.5),
-                           (l[4], 0.0, 0.0),
-                           (l[5], 1.0, 1.0)),
-                 'blue': ((l[0], 1.0, 1.0),
-                          (l[1], 1.0, 1.0),
-                          (l[2], 0.5, 0.5),
-                          (l[3], 0.5, 0.5),
-                          (l[4], 0.0, 0.0),
-                          (l[5], 0.0, 0.0))}
+        if len(limits) == 3:
+            limits = (np.concatenate((-np.flipud(limits), limits)) +
+                      limits[-1]) / (2 * limits[-1])
+        else:
+            limits = (limits - np.min(limits)) / np.max(limits -
+                                                        np.min(limits))
+
+        cdict = {'red': ((limits[0], 0.0, 0.0),
+                         (limits[1], 0.0, 0.0),
+                         (limits[2], 0.5, 0.5),
+                         (limits[3], 0.5, 0.5),
+                         (limits[4], 1.0, 1.0),
+                         (limits[5], 1.0, 1.0)),
+                 'green': ((limits[0], 1.0, 1.0),
+                           (limits[1], 0.0, 0.0),
+                           (limits[2], 0.5, 0.5),
+                           (limits[3], 0.5, 0.5),
+                           (limits[4], 0.0, 0.0),
+                           (limits[5], 1.0, 1.0)),
+                 'blue': ((limits[0], 1.0, 1.0),
+                          (limits[1], 1.0, 1.0),
+                          (limits[2], 0.5, 0.5),
+                          (limits[3], 0.5, 0.5),
+                          (limits[4], 0.0, 0.0),
+                          (limits[5], 0.0, 0.0))}
         return colors.LinearSegmentedColormap('mne_analyze', cdict)
     elif format == 'mayavi':
-        l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
+        if len(limits) == 3:
+            limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
+                limits[-1]
+        else:
+            limits = np.concatenate((limits[:3], [0], limits[3:]))
+            limits /= np.max(np.abs(limits))
         r = np.array([0, 0, 0, 0, 1, 1, 1])
         g = np.array([1, 0, 0, 0, 0, 0, 1])
         b = np.array([1, 1, 1, 0, 0, 0, 0])
         a = np.array([1, 1, 0, 0, 0, 1, 1])
         xp = (np.arange(256) - 128) / 128.0
-        colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
+        colormap = np.r_[[np.interp(xp, limits, 255 * c)
+                          for c in [r, g, b, a]]].T
         return colormap
     else:
         raise ValueError('format must be either matplotlib or mayavi')
@@ -204,13 +187,13 @@ def _toggle_options(event, params):
     """Toggle options (projectors) dialog"""
     import matplotlib.pyplot as plt
     if len(params['projs']) > 0:
-        if params['fig_opts'] is None:
+        if params['fig_proj'] is None:
             _draw_proj_checkbox(event, params, draw_current_state=False)
         else:
             # turn off options dialog
-            plt.close(params['fig_opts'])
+            plt.close(params['fig_proj'])
             del params['proj_checks']
-            params['fig_opts'] = None
+            params['fig_proj'] = None
 
 
 def _toggle_proj(event, params):
@@ -226,7 +209,7 @@ def _toggle_proj(event, params):
         bools = [True] * len(params['projs'])
 
     compute_proj = False
-    if not 'proj_bools' in params:
+    if 'proj_bools' not in params:
         compute_proj = True
     elif not np.array_equal(bools, params['proj_bools']):
         compute_proj = True
@@ -236,6 +219,95 @@ def _toggle_proj(event, params):
         params['plot_update_proj_callback'](params, bools)
 
 
+def _get_help_text(params):
+    """Aux function for customizing help dialogs text."""
+    text, text2 = list(), list()
+
+    text.append(u'\u2190 : \n')
+    text.append(u'\u2192 : \n')
+    text.append(u'\u2193 : \n')
+    text.append(u'\u2191 : \n')
+    text.append(u'- : \n')
+    text.append(u'+ or = : \n')
+    text.append(u'Home : \n')
+    text.append(u'End : \n')
+    text.append(u'Page down : \n')
+    text.append(u'Page up : \n')
+
+    text.append(u'F11 : \n')
+    text.append(u'? : \n')
+    text.append(u'Esc : \n\n')
+    text.append(u'Mouse controls\n')
+    text.append(u'click on data :\n')
+
+    text2.append('Navigate left\n')
+    text2.append('Navigate right\n')
+
+    text2.append('Scale down\n')
+    text2.append('Scale up\n')
+
+    text2.append('Toggle full screen mode\n')
+    text2.append('Open help box\n')
+    text2.append('Quit\n\n\n')
+    if 'raw' in params:
+        text2.insert(4, 'Reduce the time shown per view\n')
+        text2.insert(5, 'Increase the time shown per view\n')
+        text.append(u'click elsewhere in the plot :\n')
+        if 'ica' in params:
+            text.append(u'click component name :\n')
+            text2.insert(2, 'Navigate components down\n')
+            text2.insert(3, 'Navigate components up\n')
+            text2.insert(8, 'Reduce the number of components per view\n')
+            text2.insert(9, 'Increase the number of components per view\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Show topography for the component\n')
+        else:
+            text.append(u'click channel name :\n')
+            text2.insert(2, 'Navigate channels down\n')
+            text2.insert(3, 'Navigate channels up\n')
+            text2.insert(8, 'Reduce the number of channels per view\n')
+            text2.insert(9, 'Increase the number of channels per view\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Mark bad channel\n')
+
+    elif 'epochs' in params:
+        text.append(u'right click :\n')
+        text2.insert(4, 'Reduce the number of epochs per view\n')
+        text2.insert(5, 'Increase the number of epochs per view\n')
+        if 'ica' in params:
+            text.append(u'click component name :\n')
+            text2.insert(2, 'Navigate components down\n')
+            text2.insert(3, 'Navigate components up\n')
+            text2.insert(8, 'Reduce the number of components per view\n')
+            text2.insert(9, 'Increase the number of components per view\n')
+            text2.append('Mark component for exclusion\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Show topography for the component\n')
+        else:
+            text.append(u'click channel name :\n')
+            text.append(u'right click channel name :\n')
+            text2.insert(2, 'Navigate channels down\n')
+            text2.insert(3, 'Navigate channels up\n')
+            text2.insert(8, 'Reduce the number of channels per view\n')
+            text2.insert(9, 'Increase the number of channels per view\n')
+            text.insert(10, u'b : \n')
+            text2.insert(10, 'Toggle butterfly plot on/off\n')
+            text.insert(11, u'h : \n')
+            text2.insert(11, 'Show histogram of peak-to-peak values\n')
+            text2.append('Mark bad epoch\n')
+            text2.append('Vertical line at a time instant\n')
+            text2.append('Mark bad channel\n')
+            text2.append('Plot ERP/ERF image\n')
+            text.append(u'middle click :\n')
+            text2.append('Show channel name (butterfly plot)\n')
+        text.insert(11, u'o : \n')
+        text2.insert(11, 'View settings (orig. view only)\n')
+
+    return ''.join(text), ''.join(text2)
+
+
 def _prepare_trellis(n_cells, max_col):
     """Aux function
     """
@@ -256,8 +328,7 @@ def _prepare_trellis(n_cells, max_col):
 
 def _draw_proj_checkbox(event, params, draw_current_state=True):
     """Toggle options (projectors) dialog"""
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
+    from matplotlib import widgets
     projs = params['projs']
     # turn on options dialog
 
@@ -269,13 +340,10 @@ def _draw_proj_checkbox(event, params, draw_current_state=True):
     height = len(projs) / 6.0 + 0.5
     fig_proj = figure_nobar(figsize=(width, height))
     fig_proj.canvas.set_window_title('SSP projection vectors')
-    ax_temp = plt.axes((0, 0, 1, 1))
-    ax_temp.get_yaxis().set_visible(False)
-    ax_temp.get_xaxis().set_visible(False)
-    fig_proj.add_axes(ax_temp)
+    params['fig_proj'] = fig_proj  # necessary for proper toggling
+    ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
 
-    proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
-                                           actives=actives)
+    proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
     # change already-applied projectors to red
     for ii, p in enumerate(projs):
         if p['active'] is True:
@@ -295,6 +363,60 @@ def _draw_proj_checkbox(event, params, draw_current_state=True):
         pass
 
 
+def _layout_figure(params):
+    """Function for setting figure layout. Shared with raw and epoch plots"""
+    size = params['fig'].get_size_inches() * params['fig'].dpi
+    scroll_width = 25
+    hscroll_dist = 25
+    vscroll_dist = 10
+    l_border = 100
+    r_border = 10
+    t_border = 35
+    b_border = 40
+
+    # only bother trying to reset layout if it's reasonable to do so
+    if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
+        return
+
+    # convert to relative units
+    scroll_width_x = scroll_width / size[0]
+    scroll_width_y = scroll_width / size[1]
+    vscroll_dist /= size[0]
+    hscroll_dist /= size[1]
+    l_border /= size[0]
+    r_border /= size[0]
+    t_border /= size[1]
+    b_border /= size[1]
+    # main axis (traces)
+    ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
+    ax_y = hscroll_dist + scroll_width_y + b_border
+    ax_height = 1.0 - ax_y - t_border
+
+    pos = [l_border, ax_y, ax_width, ax_height]
+
+    params['ax'].set_position(pos)
+    if 'ax2' in params:
+        params['ax2'].set_position(pos)
+    params['ax'].set_position(pos)
+    # vscroll (channels)
+    pos = [ax_width + l_border + vscroll_dist, ax_y,
+           scroll_width_x, ax_height]
+    params['ax_vscroll'].set_position(pos)
+    # hscroll (time)
+    pos = [l_border, b_border, ax_width, scroll_width_y]
+    params['ax_hscroll'].set_position(pos)
+    if 'ax_button' in params:
+        # options button
+        pos = [l_border + ax_width + vscroll_dist, b_border,
+               scroll_width_x, scroll_width_y]
+        params['ax_button'].set_position(pos)
+    if 'ax_help_button' in params:
+        pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
+               scroll_width_x * 2, scroll_width_y]
+        params['ax_help_button'].set_position(pos)
+    params['fig'].canvas.draw()
+
+
 @verbose
 def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
                  read_limit=np.inf, max_str=30, verbose=None):
@@ -336,7 +458,7 @@ def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
     if fname_out is not None:
         f = open(fname_out, 'w')
     else:
-        f = tempfile.NamedTemporaryFile('w', delete=False)
+        f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
         fname_out = f.name
     with f as fid:
         fid.write(diff)
@@ -347,11 +469,10 @@ def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent='    ',
 
 def figure_nobar(*args, **kwargs):
     """Make matplotlib figure with no toolbar"""
-    import matplotlib.pyplot as plt
-    import matplotlib as mpl
-    old_val = mpl.rcParams['toolbar']
+    from matplotlib import rcParams, pyplot as plt
+    old_val = rcParams['toolbar']
     try:
-        mpl.rcParams['toolbar'] = 'none'
+        rcParams['toolbar'] = 'none'
         fig = plt.figure(*args, **kwargs)
         # remove button press catchers (for toolbar)
         cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
@@ -360,5 +481,364 @@ def figure_nobar(*args, **kwargs):
     except Exception as ex:
         raise ex
     finally:
-        mpl.rcParams['toolbar'] = old_val
+        rcParams['toolbar'] = old_val
     return fig
+
+
+def _helper_raw_resize(event, params):
+    """Helper for resizing"""
+    size = ','.join([str(s) for s in params['fig'].get_size_inches()])
+    set_config('MNE_BROWSE_RAW_SIZE', size)
+    _layout_figure(params)
+
+
+def _plot_raw_onscroll(event, params, len_channels=None):
+    """Interpret scroll events"""
+    if len_channels is None:
+        len_channels = len(params['info']['ch_names'])
+    orig_start = params['ch_start']
+    if event.step < 0:
+        params['ch_start'] = min(params['ch_start'] + params['n_channels'],
+                                 len_channels - params['n_channels'])
+    else:  # event.key == 'up':
+        params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
+    if orig_start != params['ch_start']:
+        _channels_changed(params, len_channels)
+
+
+def _channels_changed(params, len_channels):
+    """Helper function for dealing with the vertical shift of the viewport."""
+    if params['ch_start'] + params['n_channels'] > len_channels:
+        params['ch_start'] = len_channels - params['n_channels']
+    if params['ch_start'] < 0:
+        params['ch_start'] = 0
+    params['plot_fun']()
+
+
+def _plot_raw_time(value, params):
+    """Deal with changed time value"""
+    info = params['info']
+    max_times = params['n_times'] / float(info['sfreq']) - params['duration']
+    if value > max_times:
+        value = params['n_times'] / info['sfreq'] - params['duration']
+    if value < 0:
+        value = 0
+    if params['t_start'] != value:
+        params['t_start'] = value
+        params['hsel_patch'].set_x(value)
+
+
+def _plot_raw_onkey(event, params):
+    """Interpret key presses"""
+    import matplotlib.pyplot as plt
+    if event.key == 'escape':
+        plt.close(params['fig'])
+    elif event.key == 'down':
+        params['ch_start'] += params['n_channels']
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'up':
+        params['ch_start'] -= params['n_channels']
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'right':
+        value = params['t_start'] + params['duration']
+        _plot_raw_time(value, params)
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == 'left':
+        value = params['t_start'] - params['duration']
+        _plot_raw_time(value, params)
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key in ['+', '=']:
+        params['scale_factor'] *= 1.1
+        params['plot_fun']()
+    elif event.key == '-':
+        params['scale_factor'] /= 1.1
+        params['plot_fun']()
+    elif event.key == 'pageup':
+        n_channels = params['n_channels'] + 1
+        offset = params['ax'].get_ylim()[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].set_yticks(params['offsets'])
+        params['vsel_patch'].set_height(n_channels)
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'pagedown':
+        n_channels = params['n_channels'] - 1
+        if n_channels == 0:
+            return
+        offset = params['ax'].get_ylim()[0] / n_channels
+        params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
+        params['n_channels'] = n_channels
+        params['ax'].set_yticks(params['offsets'])
+        params['vsel_patch'].set_height(n_channels)
+        if len(params['lines']) > n_channels:  # remove line from view
+            params['lines'][n_channels].set_xdata([])
+            params['lines'][n_channels].set_ydata([])
+        _channels_changed(params, len(params['info']['ch_names']))
+    elif event.key == 'home':
+        duration = params['duration'] - 1.0
+        if duration <= 0:
+            return
+        params['duration'] = duration
+        params['hsel_patch'].set_width(params['duration'])
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == 'end':
+        duration = params['duration'] + 1.0
+        if duration > params['raw'].times[-1]:
+            duration = params['raw'].times[-1]
+        params['duration'] = duration
+        params['hsel_patch'].set_width(params['duration'])
+        params['update_fun']()
+        params['plot_fun']()
+    elif event.key == '?':
+        _onclick_help(event, params)
+    elif event.key == 'f11':
+        mng = plt.get_current_fig_manager()
+        mng.full_screen_toggle()
+
+
+def _mouse_click(event, params):
+    """Vertical select callback"""
+    if event.button != 1:
+        return
+    if event.inaxes is None:
+        if params['n_channels'] > 100:
+            return
+        ax = params['ax']
+        ylim = ax.get_ylim()
+        pos = ax.transData.inverted().transform((event.x, event.y))
+        if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
+            return
+        params['label_click_fun'](pos)
+    # vertical scrollbar changed
+    if event.inaxes == params['ax_vscroll']:
+        ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
+        if params['ch_start'] != ch_start:
+            params['ch_start'] = ch_start
+            params['plot_fun']()
+    # horizontal scrollbar changed
+    elif event.inaxes == params['ax_hscroll']:
+        _plot_raw_time(event.xdata - params['duration'] / 2, params)
+        params['update_fun']()
+        params['plot_fun']()
+
+    elif event.inaxes == params['ax']:
+        params['pick_bads_fun'](event)
+
+
+def _select_bads(event, params, bads):
+    """Helper for selecting bad channels onpick. Returns updated bads list."""
+    # trade-off, avoid selecting more than one channel when drifts are present
+    # however for clean data don't click on peaks but on flat segments
+    def f(x, y):
+        return y(np.mean(x), x.std() * 2)
+    lines = event.inaxes.lines
+    for line in lines:
+        ydata = line.get_ydata()
+        if not isinstance(ydata, list) and not np.isnan(ydata).any():
+            ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
+            if ymin <= event.ydata <= ymax:
+                this_chan = vars(line)['ch_name']
+                if this_chan in params['info']['ch_names']:
+                    ch_idx = params['ch_start'] + lines.index(line)
+                    if this_chan not in bads:
+                        bads.append(this_chan)
+                        color = params['bad_color']
+                        line.set_zorder(-1)
+                    else:
+                        while this_chan in bads:
+                            bads.remove(this_chan)
+                        color = vars(line)['def_color']
+                        line.set_zorder(0)
+                    line.set_color(color)
+                    params['ax_vscroll'].patches[ch_idx].set_color(color)
+                    break
+    else:
+        x = np.array([event.xdata] * 2)
+        params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
+        params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
+        params['vertline_t'].set_text('%0.3f' % x[0])
+    return bads
+
+
+def _onclick_help(event, params):
+    """Function for drawing help window"""
+    import matplotlib.pyplot as plt
+    text, text2 = _get_help_text(params)
+
+    width = 6
+    height = 5
+
+    fig_help = figure_nobar(figsize=(width, height), dpi=80)
+    fig_help.canvas.set_window_title('Help')
+    ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
+    ax.set_title('Keyboard shortcuts')
+    plt.axis('off')
+    ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
+    ax1.set_yticklabels(list())
+    plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
+             ha='right')
+    plt.axis('off')
+
+    ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
+    ax2.set_yticklabels(list())
+    plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
+    plt.axis('off')
+
+    tight_layout(fig=fig_help)
+    # this should work for non-test cases
+    try:
+        fig_help.canvas.draw()
+        fig_help.show()
+    except Exception:
+        pass
+
+
+class ClickableImage(object):
+
+    """
+    Display an image so you can click on it and store x/y positions.
+
+    Takes as input an image array (can be any array that works with imshow,
+    but will work best with images.  Displays the image and lets you
+    click on it.  Stores the xy coordinates of each click, so now you can
+    superimpose something on top of it.
+
+    Upon clicking, the x/y coordinate of the cursor will be stored in
+    self.coords, which is a list of (x, y) tuples.
+
+    Parameters
+    ----------
+    imdata: ndarray
+        The image that you wish to click on for 2-d points.
+    **kwargs : dict
+        Keyword arguments. Passed to ax.imshow.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    """
+
+    def __init__(self, imdata, **kwargs):
+        """Display the image for clicking."""
+        from matplotlib.pyplot import figure, show
+        self.coords = []
+        self.imdata = imdata
+        self.fig = figure()
+        self.ax = self.fig.add_subplot(111)
+        self.ymax = self.imdata.shape[0]
+        self.xmax = self.imdata.shape[1]
+        self.im = self.ax.imshow(imdata, aspect='auto',
+                                 extent=(0, self.xmax, 0, self.ymax),
+                                 picker=True, **kwargs)
+        self.ax.axis('off')
+        self.fig.canvas.mpl_connect('pick_event', self.onclick)
+        show()
+
+    def onclick(self, event):
+        """Mouse click handler.
+
+        Parameters
+        ----------
+        event: matplotlib event object
+            The matplotlib object that we use to get x/y position.
+        """
+        mouseevent = event.mouseevent
+        self.coords.append((mouseevent.xdata, mouseevent.ydata))
+
+    def plot_clicks(self, **kwargs):
+        """Plot the x/y positions stored in self.coords.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Arguments are passed to imshow in displaying the bg image.
+        """
+        from matplotlib.pyplot import subplots, show
+        f, ax = subplots()
+        ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
+        xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
+        xcoords, ycoords = zip(*self.coords)
+        ax.scatter(xcoords, ycoords, c='r')
+        ann_text = np.arange(len(self.coords)).astype(str)
+        for txt, coord in zip(ann_text, self.coords):
+            ax.annotate(txt, coord, fontsize=20, color='r')
+        ax.set_xlim(xlim)
+        ax.set_ylim(ylim)
+        show()
+
+    def to_layout(self, **kwargs):
+        """Turn coordinates into an MNE Layout object.
+
+        Normalizes by the image you used to generate clicks
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Arguments are passed to generate_2d_layout
+        """
+        from mne.channels.layout import generate_2d_layout
+        coords = np.array(self.coords)
+        lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
+        return lt
+
+
+def _fake_click(fig, ax, point, xform='ax', button=1):
+    """Helper to fake a click at a relative point within axes."""
+    if xform == 'ax':
+        x, y = ax.transAxes.transform_point(point)
+    elif xform == 'data':
+        x, y = ax.transData.transform_point(point)
+    else:
+        raise ValueError('unknown transform')
+    try:
+        fig.canvas.button_press_event(x, y, button, False, None)
+    except Exception:  # for old MPL
+        fig.canvas.button_press_event(x, y, button, False)
+
+
+def add_background_image(fig, im, set_ratios=None):
+    """Add a background image to a plot.
+
+    Adds the image specified in `im` to the
+    figure `fig`. This is generally meant to
+    be done with topo plots, though it could work
+    for any plot.
+
+    Note: This modifies the figure and/or axes
+    in place.
+
+    Parameters
+    ----------
+    fig: plt.figure
+        The figure you wish to add a bg image to.
+    im: ndarray
+        A numpy array that works with a call to
+        plt.imshow(im). This will be plotted
+        as the background of the figure.
+    set_ratios: None | str
+        Set the aspect ratio of any axes in fig
+        to the value in set_ratios. Defaults to None,
+        which does nothing to axes.
+
+    Returns
+    -------
+    ax_im: instance of the create matplotlib axis object
+        corresponding to the image you added.
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    """
+    if set_ratios is not None:
+        for ax in fig.axes:
+            ax.set_aspect(set_ratios)
+
+    ax_im = fig.add_axes([0, 0, 1, 1])
+    ax_im.imshow(im, aspect='auto')
+    ax_im.set_zorder(-1)
+    return ax_im
diff --git a/setup.cfg b/setup.cfg
index 0d2de22..1ae30e4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,7 +15,7 @@ release = egg_info -RDb ''
 doc-files = doc
 
 [nosetests]
-with-coverage = 1
+# with-coverage = 1
 # cover-html = 1
 # cover-html-dir = coverage
 cover-package = mne
@@ -26,3 +26,7 @@ doctest-tests = 1
 doctest-extension = rst
 doctest-fixtures = _fixture
 #doctest-options = +ELLIPSIS,+NORMALIZE_WHITESPACE
+
+[flake8]
+exclude = __init__.py,*externals*,constants.py
+ignore = E241
diff --git a/setup.py b/setup.py
index b8c143d..4428bbc 100755
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,13 @@
 #! /usr/bin/env python
 #
 
-# Copyright (C) 2011-2014 Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+# Copyright (C) 2011-2014 Alexandre Gramfort
+# <alexandre.gramfort at telecom-paristech.fr>
 
 import os
 from os import path as op
 
-import setuptools  # noqa; analysis:ignore; we are using a setuptools namespace
+import setuptools  # noqa; we are using a setuptools namespace
 from numpy.distutils.core import setup
 
 # get the version (don't import mne here, so dependencies are not needed)
@@ -60,16 +61,22 @@ if __name__ == "__main__":
           platforms='any',
           packages=['mne', 'mne.tests',
                     'mne.beamformer', 'mne.beamformer.tests',
+                    'mne.commands', 'mne.commands.tests',
                     'mne.connectivity', 'mne.connectivity.tests',
                     'mne.data',
                     'mne.datasets',
-                    'mne.datasets.sample',
-                    'mne.datasets.megsim',
-                    'mne.datasets.spm_face',
                     'mne.datasets.eegbci',
+                    'mne.datasets._fake',
+                    'mne.datasets.megsim',
+                    'mne.datasets.sample',
                     'mne.datasets.somato',
+                    'mne.datasets.spm_face',
+                    'mne.datasets.brainstorm',
+                    'mne.datasets.testing',
+                    'mne.datasets.tests',
                     'mne.externals',
-                    'mne.fiff',
+                    'mne.externals.h5io',
+                    'mne.externals.tempita',
                     'mne.io', 'mne.io.tests',
                     'mne.io.array', 'mne.io.array.tests',
                     'mne.io.brainvision', 'mne.io.brainvision.tests',
@@ -81,9 +88,7 @@ if __name__ == "__main__":
                     'mne.forward', 'mne.forward.tests',
                     'mne.viz', 'mne.viz.tests',
                     'mne.gui', 'mne.gui.tests',
-                    'mne.layouts', 'mne.layouts.tests',
                     'mne.minimum_norm', 'mne.minimum_norm.tests',
-                    'mne.mixed_norm',
                     'mne.inverse_sparse', 'mne.inverse_sparse.tests',
                     'mne.preprocessing', 'mne.preprocessing.tests',
                     'mne.simulation', 'mne.simulation.tests',
@@ -92,14 +97,19 @@ if __name__ == "__main__":
                     'mne.time_frequency', 'mne.time_frequency.tests',
                     'mne.realtime', 'mne.realtime.tests',
                     'mne.decoding', 'mne.decoding.tests',
-                    'mne.commands', 'mne.externals',
-                    'mne.externals.tempita'],
+                    'mne.commands',
+                    'mne.channels', 'mne.channels.tests'],
           package_data={'mne': [op.join('data', '*.sel'),
                                 op.join('data', 'icos.fif.gz'),
-                                op.join('data', 'coil_def.dat'),
+                                op.join('data', 'coil_def*.dat'),
                                 op.join('data', 'helmets', '*.fif.gz'),
-                                op.join('layouts', '*.lout'),
-                                op.join('layouts', '*.lay'),
+                                op.join('data', 'FreeSurferColorLUT.txt'),
+                                op.join('channels', 'data', 'layouts', '*.lout'),
+                                op.join('channels', 'data', 'layouts', '*.lay'),
+                                op.join('channels', 'data', 'montages', '*.sfp'),
+                                op.join('channels', 'data', 'montages', '*.txt'),
+                                op.join('channels', 'data', 'montages', '*.elc'),
+                                op.join('channels', 'data', 'neighbors', '*.mat'),
                                 op.join('html', '*.js'),
                                 op.join('html', '*.css')]},
           scripts=['bin/mne'])
diff --git a/tutorials/README.txt b/tutorials/README.txt
new file mode 100644
index 0000000..d7c59ba
--- /dev/null
+++ b/tutorials/README.txt
@@ -0,0 +1,4 @@
+Tutorials
+=========
+
+Introductory tutorials to MNE.
diff --git a/examples/stats/plot_cluster_1samp_test_time_frequency.py b/tutorials/plot_cluster_1samp_test_time_frequency.py
similarity index 92%
rename from examples/stats/plot_cluster_1samp_test_time_frequency.py
rename to tutorials/plot_cluster_1samp_test_time_frequency.py
index e779ec2..638657d 100644
--- a/examples/stats/plot_cluster_1samp_test_time_frequency.py
+++ b/tutorials/plot_cluster_1samp_test_time_frequency.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_sensor_1samp_tfr:
+
 ===============================================================
 Non-parametric 1 sample cluster statistic on single trial power
 ===============================================================
@@ -20,9 +22,8 @@ The procedure consists in:
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
@@ -30,6 +31,8 @@ from mne.time_frequency import single_trial_power
 from mne.stats import permutation_cluster_1samp_test
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -74,9 +77,9 @@ evoked_data = np.mean(data, 0)
 # spectrotemporal resolution.
 decim = 5
 frequencies = np.arange(8, 40, 2)  # define frequencies of interest
-Fs = raw.info['sfreq']  # sampling in Hz
-epochs_power = single_trial_power(data, Fs=Fs, frequencies=frequencies,
-                                  n_cycles=4, use_fft=False, n_jobs=1,
+sfreq = raw.info['sfreq']  # sampling in Hz
+epochs_power = single_trial_power(data, sfreq=sfreq, frequencies=frequencies,
+                                  n_cycles=4, n_jobs=1,
                                   baseline=(-100, 0), times=times,
                                   baseline_mode='ratio', decim=decim)
 
@@ -98,12 +101,11 @@ epochs_power = np.log10(epochs_power)  # take log of ratio
 # Compute statistic
 threshold = 2.5
 T_obs, clusters, cluster_p_values, H0 = \
-                   permutation_cluster_1samp_test(epochs_power,
-                               n_permutations=100, threshold=threshold, tail=0)
+    permutation_cluster_1samp_test(epochs_power, n_permutations=100,
+                                   threshold=threshold, tail=0)
 
 ###############################################################################
 # View time-frequency plots
-import matplotlib.pyplot as plt
 plt.clf()
 plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
 plt.subplot(2, 1, 1)
diff --git a/examples/stats/plot_cluster_methods_tutorial.py b/tutorials/plot_cluster_methods_tutorial.py
similarity index 97%
rename from examples/stats/plot_cluster_methods_tutorial.py
rename to tutorials/plot_cluster_methods_tutorial.py
index 32876d8..3054ad9 100644
--- a/examples/stats/plot_cluster_methods_tutorial.py
+++ b/tutorials/plot_cluster_methods_tutorial.py
@@ -1,4 +1,7 @@
+# doc:slow-example
 """
+.. _tut_stats_cluster_methods:
+
 ======================================================
 Permutation t-test on toy data with spatial clustering
 ======================================================
@@ -45,16 +48,18 @@ this is also FWER corrected. Finally, combining the TFCE and "hat"
 methods tightens the area declared significant (again FWER corrected),
 and allows for evaluation of each point independently instead of as
 a single, broad cluster.
-"""
 
+Note that this example does quite a bit of processing, so even on a
+fast machine it can take a few minutes to complete.
+"""
 # Authors: Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
 from scipy import stats
 from functools import partial
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D  # noqa; this changes hidden mpl vars
 
 from mne.stats import (spatio_temporal_cluster_1samp_test,
                        bonferroni_correction, ttest_1samp_no_p)
@@ -64,6 +69,8 @@ try:
 except ImportError:
     from scikits.learn.feature_extraction.image import grid_to_graph
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 width = 40
@@ -158,8 +165,6 @@ ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
 ###############################################################################
 # Visualize results
 
-import matplotlib.pyplot as plt
-from mpl_toolkits.mplot3d import Axes3D  # this changes hidden matplotlib vars
 plt.ion()
 fig = plt.figure(facecolor='w')
 
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal.py b/tutorials/plot_cluster_stats_spatio_temporal.py
similarity index 94%
rename from examples/stats/plot_cluster_stats_spatio_temporal.py
rename to tutorials/plot_cluster_stats_spatio_temporal.py
index 621a7d8..96172bd 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal.py
+++ b/tutorials/plot_cluster_stats_spatio_temporal.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_source_1samp:
+
 =================================================================
 Permutation t-test on source data with spatio-temporal clustering
 =================================================================
@@ -9,12 +11,10 @@ The multiple comparisons problem is addressed with a cluster-level
 permutation test across space and time.
 
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print(__doc__)
 
 import os.path as op
 import numpy as np
@@ -29,7 +29,8 @@ from mne.stats import (spatio_temporal_cluster_1samp_test,
                        summarize_clusters_stc)
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 from mne.datasets import sample
-from mne.viz import mne_analyze_colormap
+
+print(__doc__)
 
 ###############################################################################
 # Set parameters
@@ -161,19 +162,15 @@ print('Visualizing clusters.')
 #    Now let's build a convenient representation of each cluster, where each
 #    cluster becomes a "time point" in the SourceEstimate
 stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
-                                             vertno=fsave_vertices,
+                                             vertices=fsave_vertices,
                                              subject='fsaverage')
 
 #    Let's actually plot the first "time point" in the SourceEstimate, which
 #    shows all the clusters, weighted by duration
-colormap = mne_analyze_colormap(limits=[0, 10, 50])
 subjects_dir = op.join(data_path, 'subjects')
 # blue blobs are for condition A < condition B, red for A > B
-brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both', colormap,
-                                 subjects_dir=subjects_dir,
+brain = stc_all_cluster_vis.plot(hemi='both', subjects_dir=subjects_dir,
                                  time_label='Duration significant (ms)')
 brain.set_data_time_index(0)
-# The colormap requires brain data to be scaled -fmax -> fmax
-brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
 brain.show_view('lateral')
 brain.save_image('clusters.png')
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py b/tutorials/plot_cluster_stats_spatio_temporal_2samp.py
similarity index 93%
rename from examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
rename to tutorials/plot_cluster_stats_spatio_temporal_2samp.py
index 3cd5765..a1bf4ee 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
+++ b/tutorials/plot_cluster_stats_spatio_temporal_2samp.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_source_2samp:
+
 =========================================================================
 2 samples permutation test on source data with spatio-temporal clustering
 =========================================================================
@@ -13,8 +15,6 @@ permutation test across space and time.
 #          Eric Larson <larson.eric.d at gmail.com>
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import os.path as op
 import numpy as np
 from scipy import stats as stats
@@ -24,6 +24,8 @@ from mne import spatial_tris_connectivity, grade_to_tris
 from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -90,18 +92,16 @@ print('Visualizing clusters.')
 #    cluster becomes a "time point" in the SourceEstimate
 fsave_vertices = [np.arange(10242), np.arange(10242)]
 stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
-                                             vertno=fsave_vertices,
+                                             vertices=fsave_vertices,
                                              subject='fsaverage')
 
 #    Let's actually plot the first "time point" in the SourceEstimate, which
 #    shows all the clusters, weighted by duration
 subjects_dir = op.join(data_path, 'subjects')
 # blue blobs are for condition A != condition B
-brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
+brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
                                  subjects_dir=subjects_dir,
-                                 time_label='Duration significant (ms)',
-                                 fmin=0, fmid=25, fmax=50)
+                                 time_label='Duration significant (ms)')
 brain.set_data_time_index(0)
-brain.scale_data_colormap(fmin=0, fmid=25, fmax=50, transparent=True)
 brain.show_view('lateral')
 brain.save_image('clusters.png')
diff --git a/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py b/tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
similarity index 89%
rename from examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
rename to tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
index 00b4b20..1a5ecf1 100644
--- a/examples/stats/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
+++ b/tutorials/plot_cluster_stats_spatio_temporal_repeated_measures_anova.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_source_rANOVA:
+
 ======================================================================
 Repeated measures ANOVA on source data with spatio-temporal clustering
 ======================================================================
@@ -13,28 +15,28 @@ interaction effect using a repeated measures ANOVA. The multiple
 comparisons problem is addressed with a cluster-level permutation test
 across space and time.
 """
-
 # Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #          Eric Larson <larson.eric.d at gmail.com>
 #          Denis Engemannn <denis.engemann at gmail.com>
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import os.path as op
 import numpy as np
 from numpy.random import randn
+import matplotlib.pyplot as plt
 
 import mne
 from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
                  grade_to_tris)
-from mne.stats import (spatio_temporal_cluster_test, f_threshold_twoway_rm,
-                       f_twoway_rm, summarize_clusters_stc)
+from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
+                       f_mway_rm, summarize_clusters_stc)
 
 from mne.minimum_norm import apply_inverse, read_inverse_operator
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -75,7 +77,7 @@ inverse_operator = read_inverse_operator(fname_inv)
 
 # we'll only use one hemisphere to speed up this example
 # instead of a second vertex array we'll pass an empty array
-sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([])]
+sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
 
 #    Let's average and compute inverse, then resample to speed things up
 conditions = []
@@ -115,7 +117,7 @@ for ii, condition in enumerate(conditions):
 #    each subject's data separately (and you might want to use morph_data
 #    instead), but here since all estimates are on 'sample' we can use one
 #    morph matrix for all the heavy lifting.
-fsave_vertices = [np.arange(10242), np.array([])]  # right hemisphere is empty
+fsave_vertices = [np.arange(10242), np.array([], int)]  # right hemi is empty
 morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
                                  fsave_vertices, 20, subjects_dir)
 n_vertices_fsave = morph_mat.shape[0]
@@ -166,15 +168,11 @@ def stat_fun(*args):
     # flattened array, necessitated by the clustering procedure.
     # The ANOVA however expects an input array of dimensions:
     # subjects X conditions X observations (optional).
-    # The following expression catches the list input, swaps the first and the
-    # second dimension and puts the remaining observations in the third
-    # dimension.
-    data = np.squeeze(np.swapaxes(np.array(args), 1, 0))
-    data = data.reshape(n_subjects, n_conditions,  # generalized if buffer used
-                        data.size / (n_subjects * n_conditions))
-    return f_twoway_rm(data, factor_levels=factor_levels, effects=effects,
-                       return_pvals=return_pvals)[0]
-                       #  drop p-values (empty array).
+    # The following expression catches the list input
+    # and swaps the first and the second dimension, and finally calls ANOVA.
+    return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
+                     effects=effects, return_pvals=return_pvals)[0]
+    # get f-values only.
     # Note. for further details on this ANOVA function consider the
     # corresponding time frequency example.
 
@@ -193,10 +191,10 @@ connectivity = spatial_tris_connectivity(lh_source_space)
 #    Now let's actually do the clustering. Please relax, on a small
 #    notebook and one single thread only this will take a couple of minutes ...
 pthresh = 0.0005
-f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)
+f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
 
 #    To speed things up a bit we will ...
-n_permutations = 100  # ... run fewer permutations (reduces sensitivity)
+n_permutations = 128  # ... run fewer permutations (reduces sensitivity)
 
 print('Clustering.')
 T_obs, clusters, cluster_p_values, H0 = clu = \
@@ -216,7 +214,7 @@ print('Visualizing clusters.')
 #    Now let's build a convenient representation of each cluster, where each
 #    cluster becomes a "time point" in the SourceEstimate
 stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
-                                             vertno=fsave_vertices,
+                                             vertices=fsave_vertices,
                                              subject='fsaverage')
 
 #    Let's actually plot the first "time point" in the SourceEstimate, which
@@ -226,12 +224,10 @@ subjects_dir = op.join(data_path, 'subjects')
 # The brighter the color, the stronger the interaction between
 # stimulus modality and stimulus location
 
-brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'lh',
-                                 subjects_dir=subjects_dir,
+brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, colormap='mne',
                                  time_label='Duration significant (ms)')
 
 brain.set_data_time_index(0)
-brain.scale_data_colormap(fmin=5, fmid=10, fmax=30, transparent=True)
 brain.show_view('lateral')
 brain.save_image('cluster-lh.png')
 brain.show_view('medial')
@@ -240,13 +236,12 @@ brain.show_view('medial')
 # Finally, let's investigate interaction effect by reconstructing the time
 # courses
 
-import matplotlib.pyplot as plt
 inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
                   enumerate(good_cluster_inds)][0]  # first cluster
 
 times = np.arange(X[0].shape[1]) * tstep * 1e3
 
-plt.clf()
+plt.figure()
 colors = ['y', 'b', 'g', 'purple']
 event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
 
@@ -262,12 +257,12 @@ for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
     plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
                      alpha=0.5, label='')
 
-ymin, ymax = mean_tc.min() -5, mean_tc.max() + 5 
+ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
 plt.xlabel('Time (ms)')
 plt.ylabel('Activation (F-values)')
 plt.xlim(times[[0, -1]])
 plt.ylim(ymin, ymax)
-plt.fill_betweenx(np.arange(ymin, ymax), times[inds_t[0]],
+plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
                   times[inds_t[-1]], color='orange', alpha=0.3)
 plt.legend()
 plt.title('Interaction between stimulus-modality and location.')
diff --git a/examples/stats/plot_cluster_stats_time_frequency.py b/tutorials/plot_cluster_stats_time_frequency.py
similarity index 86%
rename from examples/stats/plot_cluster_stats_time_frequency.py
rename to tutorials/plot_cluster_stats_time_frequency.py
index 1deb208..bb11b87 100644
--- a/examples/stats/plot_cluster_stats_time_frequency.py
+++ b/tutorials/plot_cluster_stats_time_frequency.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_sensor_2samp_tfr:
+
 =========================================================================
 Non-parametric between conditions cluster statistic on single trial power
 =========================================================================
@@ -21,9 +23,8 @@ The procedure consists in:
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
@@ -31,6 +32,8 @@ from mne.time_frequency import single_trial_power
 from mne.stats import permutation_cluster_test
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -84,17 +87,16 @@ times = 1e3 * epochs_condition_1.times  # change unit to ms
 # spectrotemporal resolution.
 decim = 2
 frequencies = np.arange(7, 30, 3)  # define frequencies of interest
-Fs = raw.info['sfreq']  # sampling in Hz
+sfreq = raw.info['sfreq']  # sampling in Hz
 n_cycles = 1.5
-epochs_power_1 = single_trial_power(data_condition_1, Fs=Fs,
+
+epochs_power_1 = single_trial_power(data_condition_1, sfreq=sfreq,
                                     frequencies=frequencies,
-                                    n_cycles=n_cycles, use_fft=False,
-                                    decim=decim)
+                                    n_cycles=n_cycles, decim=decim)
 
-epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs,
+epochs_power_2 = single_trial_power(data_condition_2, sfreq=sfreq,
                                     frequencies=frequencies,
-                                    n_cycles=n_cycles, use_fft=False,
-                                    decim=decim)
+                                    n_cycles=n_cycles, decim=decim)
 
 epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
 epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix
@@ -111,12 +113,11 @@ epochs_power_2 /= epochs_baseline_2[..., np.newaxis]
 # Compute statistic
 threshold = 6.0
 T_obs, clusters, cluster_p_values, H0 = \
-                   permutation_cluster_test([epochs_power_1, epochs_power_2],
-                               n_permutations=100, threshold=threshold, tail=0)
+    permutation_cluster_test([epochs_power_1, epochs_power_2],
+                             n_permutations=100, threshold=threshold, tail=0)
 
 ###############################################################################
 # View time-frequency plots
-import matplotlib.pyplot as plt
 plt.clf()
 plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
 plt.subplot(2, 1, 1)
@@ -136,12 +137,12 @@ for c, p_val in zip(clusters, cluster_p_values):
     if p_val <= 0.05:
         T_obs_plot[c] = T_obs[c]
 
-plt.imshow(T_obs, cmap=plt.cm.gray,
+plt.imshow(T_obs,
            extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-           aspect='auto', origin='lower')
-plt.imshow(T_obs_plot, cmap=plt.cm.jet,
+           aspect='auto', origin='lower', cmap='RdBu_r')
+plt.imshow(T_obs_plot,
            extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
-           aspect='auto', origin='lower')
+           aspect='auto', origin='lower', cmap='RdBu_r')
 
 plt.xlabel('time (ms)')
 plt.ylabel('Frequency (Hz)')
diff --git a/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py b/tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py
similarity index 86%
rename from examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
rename to tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py
index ba2da24..7ee302f 100644
--- a/examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py
+++ b/tutorials/plot_cluster_stats_time_frequency_repeated_measures_anova.py
@@ -1,4 +1,6 @@
 """
+.. _tut_stats_cluster_sensor_rANOVA_tfr
+
 ====================================================================
 Mass-univariate twoway repeated measures ANOVA on single trial power
 ====================================================================
@@ -24,16 +26,17 @@ multiple comparisons using False Discovery Rate correction.
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import numpy as np
+import matplotlib.pyplot as plt
 
 import mne
 from mne import io
 from mne.time_frequency import single_trial_power
-from mne.stats import f_threshold_twoway_rm, f_twoway_rm, fdr_correction
+from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
 from mne.datasets import sample
 
+print(__doc__)
+
 ###############################################################################
 # Set parameters
 data_path = sample.data_path()
@@ -74,15 +77,15 @@ times = 1e3 * epochs.times  # change unit to ms
 # single_trial_power.
 decim = 2
 frequencies = np.arange(7, 30, 3)  # define frequencies of interest
-Fs = raw.info['sfreq']  # sampling in Hz
+sfreq = raw.info['sfreq']  # sampling in Hz
 n_cycles = frequencies / frequencies[0]
 baseline_mask = times[::decim] < 0
 
 # now create TFR representations for all conditions
 epochs_power = []
 for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
-    this_power = single_trial_power(condition, Fs=Fs, frequencies=frequencies,
-                                    n_cycles=n_cycles, use_fft=False,
+    this_power = single_trial_power(condition, sfreq=sfreq,
+                                    frequencies=frequencies, n_cycles=n_cycles,
                                     decim=decim)
     this_power = this_power[:, 0, :, :]  # we only have one channel.
     # Compute ratio with baseline power (be sure to correct time vector with
@@ -127,10 +130,9 @@ print(data.shape)
 #
 # Now we're ready to run our repeated measures ANOVA.
 
-fvals, pvals = f_twoway_rm(data, factor_levels, effects=effects)
+fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
 
 effect_labels = ['modality', 'location', 'modality by location']
-import matplotlib.pyplot as plt
 
 # let's visualize our effects by computing f-images
 for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
@@ -141,7 +143,7 @@ for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
                origin='lower')
     # create mask for significant Time-frequency locations
     effect = np.ma.masked_array(effect, [sig > .05])
-    plt.imshow(effect.reshape(8, 211), cmap=plt.cm.jet, extent=[times[0],
+    plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
                times[-1], frequencies[0], frequencies[-1]], aspect='auto',
                origin='lower')
     plt.colorbar()
@@ -171,20 +173,16 @@ def stat_fun(*args):
     # flattened array, necessitated by the clustering procedure.
     # The ANOVA however expects an input array of dimensions:
     # subjects X conditions X observations (optional).
-    # The following expression catches the list input, swaps the first and the
-    # second dimension and puts the remaining observations in the third
-    # dimension.
-    data = np.swapaxes(np.asarray(args), 1, 0).reshape(n_replications,
-                                                       n_conditions,
-                                                       n_times * n_frequencies)
-    return f_twoway_rm(data, factor_levels=factor_levels,
-                       effects=effects, return_pvals=False)[0]
+    # The following expression catches the list input and swaps the first and
+    # the second dimension and finally calls the ANOVA function.
+    return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
+                     effects=effects, return_pvals=False)[0]
     # The ANOVA returns a tuple f-values and p-values, we will pick the former.
 
 
 pthresh = 0.00001  # set threshold rather high to save some time
-f_thresh = f_threshold_twoway_rm(n_replications, factor_levels, effects,
-                                 pthresh)
+f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
+                               pthresh)
 tail = 1  # f-test, so tail > 0
 n_permutations = 256  # Save some time (the test won't be too sensitive ...)
 T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
@@ -193,10 +191,11 @@ T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
 
 # Create new stats image with only significant clusters
 good_clusers = np.where(cluster_p_values < .05)[0]
-T_obs_plot = np.ma.masked_array(T_obs, np.invert(clusters[good_clusers]))
+T_obs_plot = np.ma.masked_array(T_obs,
+                                np.invert(clusters[np.squeeze(good_clusers)]))
 
 plt.figure()
-for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, plt.cm.jet]):
+for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
     plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
                frequencies[0], frequencies[-1]], aspect='auto',
                origin='lower')
@@ -211,7 +210,7 @@ mask, _ = fdr_correction(pvals[2])
 T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
 
 plt.figure()
-for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, plt.cm.jet]):
+for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
     plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
                frequencies[0], frequencies[-1]], aspect='auto',
                origin='lower')
diff --git a/tutorials/plot_creating_data_structures.py b/tutorials/plot_creating_data_structures.py
new file mode 100644
index 0000000..ce4b32e
--- /dev/null
+++ b/tutorials/plot_creating_data_structures.py
@@ -0,0 +1,180 @@
+"""
+.. _tut_creating_data_structures:
+
+Creating MNE-Python's data structures from scratch
+==================================================
+"""
+
+from __future__ import print_function
+
+import mne
+import numpy as np
+
+
+###############################################################################
+# ------------------------------------------------------
+# Creating :class:`Info <mne.io.meas_info.Info>` objects
+# ------------------------------------------------------
+#
+# .. note:: for full documentation on the `Info` object, see
+#           :ref:`tut_info_objects`.
+#
+# Normally, :class:`mne.io.meas_info.Info` objects are created by the various
+# :ref:`data import functions` <ch_raw>`.
+# However, if you wish to create one from scratch, you can use the
+# :func:`mne.create_info` function to initialize the minimally required
+# fields. Further fields can be assigned later as one would with a regular
+# dictionary.
+#
+# The following creates the absolute minimum info structure:
+
+# Create some dummy metadata
+n_channels = 32
+sampling_rate = 200
+info = mne.create_info(32, sampling_rate)
+print(info)
+
+###############################################################################
+# You can also supply more extensive metadata:
+
+# Names for each channel
+channel_names = ['MEG1', 'MEG2', 'Cz', 'Pz', 'EOG']
+
+# The type (mag, grad, eeg, eog, misc, ...) of each channel
+channel_types = ['grad', 'grad', 'eeg', 'eeg', 'eog']
+
+# The sampling rate of the recording
+sfreq = 1000  # in Hertz
+
+# The EEG channels use the standard naming strategy.
+# By supplying the 'montage' parameter, approximate locations
+# will be added for them
+montage = 'standard_1005'
+
+# Initialize required fields
+info = mne.create_info(channel_names, sfreq, channel_types, montage)
+
+# Add some more information
+info['description'] = 'My custom dataset'
+info['bads'] = ['Pz']  # Names of bad channels
+
+print(info)
+
+###############################################################################
+# .. note:: When assigning new values to the fields of an
+#           :class:`mne.io.meas_info.Info` object, it is important that the
+#           fields are consistent:
+#
+#           - The length of the channel information field `chs` must be
+#             `nchan`.
+#           - The length of the `ch_names` field must be `nchan`.
+#           - The `ch_names` field should be consistent with the `name` field
+#             of the channel information contained in `chs`.
+#
+# ---------------------------------------------
+# Creating :class:`Raw <mne.io.RawFIF>` objects
+# ---------------------------------------------
+#
+# To create a :class:`mne.io.Raw` object from scratch, you can use the
+# :class:`mne.RawArray` class, which implements raw data that is backed by a
+# numpy array.  Its constructor simply takes the data matrix and
+# :class:`mne.io.meas_info.Info` object:
+
+# Generate some random data
+data = np.random.randn(5, 1000)
+
+# Initialize an info structure
+info = mne.create_info(
+    ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'],
+    ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'],
+    sfreq=100
+)
+
+custom_raw = mne.io.RawArray(data, info)
+print(custom_raw)
+
+###############################################################################
+# ---------------------------------------------
+# Creating :class:`Epochs <mne.Epochs>` objects
+# ---------------------------------------------
+#
+# To create an :class:`mne.Epochs` object from scratch, you can use the
+# :class:`mne.EpochsArray` class, which uses a numpy array directly without
+# wrapping a raw object. The array must be of `shape(n_epochs, n_chans,
+# n_times)`
+
+# Generate some random data: 10 epochs, 5 channels, 2 seconds per epoch
+sfreq = 100
+data = np.random.randn(10, 5, sfreq*2)
+
+# Initialize an info structure
+info = mne.create_info(
+    ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'],
+    ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'],
+    sfreq=sfreq
+)
+
+###############################################################################
+# It is necessary to supply an "events" array in order to create an Epochs
+# object. This is of `shape(n_events, 3)` where the first column is the index
+# of the event, the second column is the length of the event, and the third
+# column is the event type.
+
+# Create an event matrix: 10 events with a duration of 1 sample, alternating
+# event codes
+events = np.array([
+    [0, 1, 1],
+    [1, 1, 2],
+    [2, 1, 1],
+    [3, 1, 2],
+    [4, 1, 1],
+    [5, 1, 2],
+    [6, 1, 1],
+    [7, 1, 2],
+    [8, 1, 1],
+    [9, 1, 2],
+])
+
+###############################################################################
+# More information about the event codes: subject was either smiling or
+# frowning
+event_id = dict(smiling=1, frowning=2)
+
+###############################################################################
+# Finally, we must specify the beginning of an epoch (the end will be inferred
+# from the sampling frequency and n_samples)
+
+# Trials were cut from -0.1 to 1.0 seconds
+tmin = -0.1
+
+###############################################################################
+# Now we can create the :class:`mne.EpochsArray` object
+custom_epochs = mne.EpochsArray(data, info, events, tmin, event_id)
+
+print(custom_epochs)
+
+# We can treat the epochs object as we would any other
+_ = custom_epochs['smiling'].average().plot()
+
+###############################################################################
+# ---------------------------------------------
+# Creating :class:`Evoked <mne.Evoked>` Objects
+# ---------------------------------------------
+# If you already have data that is collapsed across trials, you may also
+# directly create an evoked array.  Its constructor accepts an array of
+# `shape(n_chans, n_times)` in addition to some bookkeeping parameters.
+
+# The averaged data
+data_evoked = data.mean(0)
+
+# The number of epochs that were averaged
+nave = data.shape[0]
+
+# A comment to describe to evoked (usually the condition name)
+comment = "Smiley faces"
+
+# Create the Evoked object
+evoked_array = mne.EvokedArray(data_evoked, info, tmin,
+                               comment=comment, nave=nave)
+print(evoked_array)
+_ = evoked_array.plot()
diff --git a/tutorials/plot_epochs_objects.py b/tutorials/plot_epochs_objects.py
new file mode 100644
index 0000000..279300d
--- /dev/null
+++ b/tutorials/plot_epochs_objects.py
@@ -0,0 +1,103 @@
+"""
+.. _tut_epochs_objects:
+
+The :class:`Epochs <mne.Epochs>` data structure: epoched data
+=============================================================
+"""
+
+from __future__ import print_function
+
+import mne
+import os.path as op
+import numpy as np
+from matplotlib import pyplot as plt
+
+###############################################################################
+# :class:`Epochs <mne.Epochs>` objects are a way of representing continuous
+# data as a collection of time-locked trials, stored in an array of
+# `shape(n_events, n_channels, n_times)`. They are useful for many statistical
+# methods in neuroscience, and make it easy to quickly overview what occurs
+# during a trial.
+#
+# :class:`Epochs <mne.Epochs>` objects can be created in three ways:
+#  1. From a :class:`Raw <mne.io.RawFIF>` object, along with event times
+#  2. From an :class:`Epochs <mne.Epochs>` object that has been saved as a
+#     `.fif` file
+#  3. From scratch using :class:`EpochsArray <mne.EpochsArray>`
+
+# Load a dataset that contains events
+raw = mne.io.RawFIF(
+    op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
+            'sample_audvis_raw.fif'))
+
+# If your raw object has a stim channel, you can construct an event array
+# easily
+events = mne.find_events(raw, stim_channel='STI 014')
+
+# Show the number of events (number of rows)
+print('Number of events:', len(events))
+
+# Show all unique event codes (3rd column)
+print('Unique event codes:', np.unique(events[:, 2]))
+
+# Specify event codes of interest with descriptive labels
+event_id = dict(left=1, right=2)
+
+###############################################################################
+# Now, we can create an :class:`mne.Epochs` object with the events we've
+# extracted. Note that epochs constructed in this manner will not have their
+# data available until explicitly read into memory, which you can do with
+# :func:`get_data <mne.Epochs.get_data>`. Alternatively, you can use
+# `preload=True`.
+#
+# Note that there are many options available when loading an
+# :class:`mne.Epochs` object.  For more detailed information, see (**LINK TO
+# EPOCHS LOADING TUTORIAL**)
+
+# Expose the raw data as epochs, cut from -0.1 s to 1.0 s relative to the event
+# onsets
+epochs = mne.Epochs(raw, events, event_id, tmin=-0.1, tmax=1,
+                    baseline=(None, 0), preload=True)
+print(epochs)
+
+###############################################################################
+# Epochs behave similarly to :class:`mne.io.Raw` objects. They have an
+# :class:`info <mne.io.meas_info.Info>` attribute that has all of the same
+# information, as well as a number of attributes unique to the events contained
+# within the object.
+
+print(epochs.events[:3], epochs.event_id, sep='\n\n')
+
+###############################################################################
+# You can select subsets of epochs by indexing the :class:`Epochs <mne.Epochs>`
+# object directly. Alternatively, if you have epoch names specified in
+# `event_id` then you may index with strings instead.
+
+print(epochs[1:5])
+print(epochs['right'])
+
+###############################################################################
+# It is also possible to iterate through :class:`Epochs <mne.Epochs>` objects
+# in this way. Note that behavior is different if you iterate on `Epochs`
+# directly rather than indexing:
+
+# These will be epochs objects
+for i in range(3):
+    print(epochs[i])
+
+# These will be arrays
+for ep in epochs[:2]:
+    print(ep)
+
+###############################################################################
+# If you wish to look at the average across trial types, then you may do so,
+# creating an `Evoked` object in the process.
+
+ev_left = epochs['left'].average()
+ev_right = epochs['right'].average()
+
+f, axs = plt.subplots(3, 2, figsize=(10, 5))
+_ = f.suptitle('Left / Right', fontsize=20)
+_ = ev_left.plot(axes=axs[:, 0], show=False)
+_ = ev_right.plot(axes=axs[:, 1], show=False)
+plt.tight_layout()
diff --git a/examples/export/plot_epochs_as_data_frame.py b/tutorials/plot_epochs_to_data_frame.py
similarity index 98%
rename from examples/export/plot_epochs_as_data_frame.py
rename to tutorials/plot_epochs_to_data_frame.py
index 7ad45e9..54d796d 100644
--- a/examples/export/plot_epochs_as_data_frame.py
+++ b/tutorials/plot_epochs_to_data_frame.py
@@ -1,4 +1,6 @@
 """
+.. _tut_io_export_pandas:
+
 =================================
 Export epochs to Pandas DataFrame
 =================================
@@ -89,14 +91,13 @@ pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
 #
 # License: BSD (3-clause)
 
-print(__doc__)
-
 import mne
 import matplotlib.pyplot as plt
 import numpy as np
 from mne.io import Raw
 from mne.datasets import sample
 
+print(__doc__)
 
 # turn on interactive mode
 plt.ion()
@@ -136,7 +137,7 @@ epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
 
 index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
 
-df = epochs.as_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
+df = epochs.to_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
                           index=index)
 
 # Create MEG channel selector and drop EOG channel.
diff --git a/examples/preprocessing/plot_ica_from_raw.py b/tutorials/plot_ica_from_raw.py
similarity index 91%
rename from examples/preprocessing/plot_ica_from_raw.py
rename to tutorials/plot_ica_from_raw.py
index 6657c9b..aa0a658 100644
--- a/examples/preprocessing/plot_ica_from_raw.py
+++ b/tutorials/plot_ica_from_raw.py
@@ -1,20 +1,20 @@
 """
-==================================
-Compute ICA components on raw data
-==================================
+.. _tut_preprocessing_ica:
+
+Compute ICA on MEG data and remove artifacts
+============================================
 
 ICA is fit to MEG raw data.
 The sources matching the ECG and EOG are automatically found and displayed.
 Subsequently, artifact detection and rejection quality are assessed.
 """
-print(__doc__)
-
 # Authors: Denis Engemann <denis.engemann at gmail.com>
 #          Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
 #
 # License: BSD (3-clause)
 
 import numpy as np
+
 import mne
 from mne.io import Raw
 from mne.preprocessing import ICA
@@ -74,7 +74,7 @@ ica.plot_scores(scores, exclude=eog_inds, title=title % 'eog')
 
 show_picks = np.abs(scores).argsort()[::-1][:5]
 
-ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'eog')
+ica.plot_sources(raw, show_picks, exclude=eog_inds, title=title % 'eog')
 ica.plot_components(eog_inds, title=title % 'eog', colorbar=True)
 
 eog_inds = eog_inds[:n_max_eog]
@@ -96,12 +96,13 @@ ica.plot_overlay(eog_evoked, exclude=eog_inds)  # plot EOG cleaning
 ica.plot_overlay(raw)  # EOG artifacts remain
 
 ###############################################################################
+
 # To save an ICA solution you can say:
-# >>> ica.save('my_ica.fif')
-#
+# ica.save('my_ica.fif')
+
 # You can later load the solution by saying:
-# >>> from mne.preprocessing import read_ica
-# >>> read_ica('my_ica.fif')
-#
+# from mne.preprocessing import read_ica
+# read_ica('my_ica.fif')
+
 # Apply the solution to Raw, Epochs or Evoked like this:
-# >>> ica.apply(epochs, copy=False)
+# ica.apply(epochs, copy=False)
diff --git a/tutorials/plot_info.py b/tutorials/plot_info.py
new file mode 100644
index 0000000..6fe6f93
--- /dev/null
+++ b/tutorials/plot_info.py
@@ -0,0 +1,90 @@
+"""
+.. _tut_info_objects:
+
+The :class:`Info <mne.io.meas_info.Info>` data structure
+========================================================
+"""
+
+from __future__ import print_function
+
+import mne
+import os.path as op
+
+###############################################################################
+# The :class:`Info <mne.io.meas_info.Info>` data object is typically created
+# when data is imported into MNE-Python and contains details such as:
+#
+#  - date, subject information, and other recording details
+#  - the samping rate
+#  - information about the data channels (name, type, position, etc.)
+#  - digitized points
+#  - sensor–head coordinate transformation matrices
+#
+# and so forth. See the :class:`the API reference <mne.io.meas_info.Info>`
+# for a complete list of all data fields. Once created, this object is passed
+# around throughout the data analysis pipeline.
+#
+# It behaves as a nested Python dictionary:
+
+# Read the info object from an example recording
+info = mne.io.read_info(
+    op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
+            'sample_audvis_raw.fif'), verbose=False)
+
+# List all the fields in the info object
+print('Keys in info dictionary:\n', info.keys())
+
+# Obtain the sampling rate of the data
+print(info['sfreq'], 'Hz')
+
+# List all information about the first data channel
+print(info['chs'][0])
+
+###############################################################################
+# Obtaining subsets of channels
+# -----------------------------
+#
+# There are a number of convenience functions to obtain channel indices, given
+# an :class:`mne.io.meas_info.Info` object.
+
+# Get channel indices by name
+channel_indices = mne.pick_channels(info['ch_names'], ['MEG 0312', 'EEG 005'])
+
+# Get channel indices by regular expression
+channel_indices = mne.pick_channels_regexp(info['ch_names'], 'MEG *')
+
+# Get channel indices by type
+channel_indices = mne.pick_types(info, meg=True)  # MEG only
+channel_indices = mne.pick_types(info, eeg=True)  # EEG only
+# MEG gradiometers and EEG channels
+channel_indices = mne.pick_types(info, meg='grad', eeg=True)
+
+# Get a dictionary of channel indices, grouped by channel type
+channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
+print('The first three magnetometers:', channel_indices_by_type['mag'][:3])
+
+###############################################################################
+# Obtaining information about channels
+# ------------------------------------
+
+# Channel type of a specific channel
+channel_type = mne.io.pick.channel_type(info, 75)
+print('Channel #75 is of type:', channel_type)
+
+# Channel types of a collection of channels
+meg_channels = mne.pick_types(info, meg=True)[:10]
+channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
+print('First 10 MEG channels are of type:\n', channel_types)
+
+###############################################################################
+# Dropping channels from an info structure
+# ----------------------------------------
+#
+# It is possible to limit the info structure to only include a subset of
+# channels with the :func:`mne.pick_info` function:
+
+# Only keep EEG channels
+eeg_indices = mne.pick_types(info, meg=False, eeg=True)
+reduced_info = mne.pick_info(info, eeg_indices)
+
+print(reduced_info)
diff --git a/tutorials/plot_introduction.py b/tutorials/plot_introduction.py
new file mode 100644
index 0000000..9f539a0
--- /dev/null
+++ b/tutorials/plot_introduction.py
@@ -0,0 +1,373 @@
+"""
+.. _intro_tutorial:
+
+Basic MEG and EEG data processing
+=================================
+
+MNE-Python reimplements most of MNE-C's (the original MNE command line utils)
+functionality and offers transparent scripting.
+On top of that it extends MNE-C's functionality considerably (customize events,
+compute
+contrasts, group statistics, time-frequency analysis, EEG-sensor space analyses
+, etc.) It uses the same files as standard MNE unix commands:
+no need to convert your files to a new system or database.
+
+What you can do with MNE Python
+-------------------------------
+
+   - **Raw data visualization** to visualize recordings, can also use
+   *mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
+   - **Epoching**: Define epochs, baseline correction, handle conditions etc.
+   - **Averaging** to get Evoked data
+   - **Compute SSP pojectors** to remove ECG and EOG artifacts
+   - **Compute ICA** to remove artifacts or select latent sources.
+   - **Boundary Element Modeling**: single and three-layer BEM model
+     creation and solution computation.
+   - **Forward modeling**: BEM computation and mesh creation
+   (see :ref:`ch_forward`)
+   - **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
+   - **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map,
+   Time-Frequency MxNE)
+   - **Connectivity estimation** in sensor and source space
+   - **Visualization of sensor and source space data**
+   - **Time-frequency** analysis with Morlet wavelets (induced power,
+   intertrial coherence, phase lock value) also in the source space
+   - **Spectrum estimation** using multi-taper method
+   - **Mixed Source Models** combining cortical and subcortical structures
+   - **Dipole Fitting**
+   - **Decoding** multivariate pattern analyis of M/EEG topographies
+   - **Compute contrasts** between conditions, between sensors, across
+   subjects etc.
+   - **Non-parametric statistics** in time, space and frequency
+   (including cluster-level)
+   - **Scripting** (batch and parallel computing)
+
+What you're not supposed to do with MNE Python
+----------------------------------------------
+
+    - **Brain and head surface segmentation** for use with BEM models -- use Freesurfer.
+
+
+.. note:: Package based on the FIF file format from Neuromag. It can read and
+          convert CTF, BTI/4D, KIT and various EEG formats to FIF.
+
+
+Installation of the required materials
+---------------------------------------
+
+See :ref:`getting_started` with Python.
+
+.. note:: The expected location for the MNE-sample data is
+    my-path-to/mne-python/examples. If you downloaded data and an example asks
+    you whether to download it again, make sure
+    the data reside in the examples directory and you run the script from its
+    current directory.
+
+    From IPython e.g. say::
+
+        cd examples/preprocessing
+
+
+    %run plot_find_ecg_artifacts.py
+
+From raw data to evoked data
+----------------------------
+
+.. _ipython: http://ipython.scipy.org/
+
+Now, launch `ipython`_ (Advanced Python shell) using the QT backend which best
+supported across systems::
+
+  $ ipython --pylab -qt
+
+First, load the mne package:
+"""
+
+import mne
+
+##############################################################################
+# If you'd like to turn information status messages off:
+
+mne.set_log_level('WARNING')
+
+##############################################################################
+# But it's generally a good idea to leave them on:
+
+mne.set_log_level('INFO')
+
+##############################################################################
+# You can set the default level by setting the environment variable
+# "MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
+
+mne.set_config('MNE_LOGGING_LEVEL','WARNING')
+
+##############################################################################
+# Note that the location of the mne-python preferences file (for easier manual
+# editing) can be found using:
+
+mne.get_config_path()
+
+##############################################################################
+# By default logging messages print to the console, but look at
+# mne.set_log_file() to save output to a file.
+#
+# Access raw data
+# ^^^^^^^^^^^^^^^
+
+from mne.datasets import sample
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+print(raw_fname)
+
+##############################################################################
+# .. note:: The MNE sample dataset should be downloaded automatically but be
+#           patient (approx. 2GB)
+#
+# Read data from file:
+
+raw = mne.io.Raw(raw_fname)
+print(raw)
+print(raw.info)
+
+##############################################################################
+# Look at the channels in raw:
+
+print(raw.ch_names)
+
+##############################################################################
+# Read and plot a segment of raw data
+
+start, stop = raw.time_as_index([100, 115])  # 100 s to 115 s data segment
+data, times = raw[:, start:stop]
+print(data.shape)
+print(times.shape)
+data, times = raw[2:20:3, start:stop]  # access underlying data
+raw.plot()
+
+##############################################################################
+# Save a segment of 150s of raw data (MEG only):
+
+picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
+                       exclude='bads')
+raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
+         overwrite=True)
+
+##############################################################################
+# Define and read epochs
+# ^^^^^^^^^^^^^^^^^^^^^^
+#
+# First extract events:
+
+events = mne.find_events(raw, stim_channel='STI 014')
+print(events[:5])
+
+##############################################################################
+# Note that, by default, we use stim_channel='STI 014'. If you have a different
+# system (e.g., a newer system that uses channel 'STI101' by default), you can
+# use the following to set the default stim channel to use for finding events:
+
+mne.set_config('MNE_STIM_CHANNEL', 'STI101')
+
+##############################################################################
+# Events are stored as 2D numpy array where the first column is the time
+# instant and the last one is the event number. It is therefore easy to
+# manipulate.
+#
+# Define epochs parameters:
+
+event_id = dict(aud_l=1, aud_r=2)  # event trigger and conditions
+tmin = -0.2  # start of each epoch (200ms before the trigger)
+tmax = 0.5  # end of each epoch (500ms after the trigger)
+
+##############################################################################
+# Exclude some channels (original bads + 2 more):
+
+raw.info['bads'] += ['MEG 2443', 'EEG 053']
+
+##############################################################################
+# The variable raw.info['bads'] is just a python list.
+#
+# Pick the good channels, excluding raw.info['bads']:
+
+picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False,
+                       exclude='bads')
+
+##############################################################################
+# Alternatively one can restrict to magnetometers or gradiometers with:
+
+mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
+grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
+
+##############################################################################
+# Define the baseline period:
+
+baseline = (None, 0)  # means from the first instant to t = 0
+
+##############################################################################
+# Define peak-to-peak rejection parameters for gradiometers, magnetometers and EOG:
+
+reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+
+##############################################################################
+# Read epochs:
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
+                    baseline=baseline, preload=False, reject=reject)
+print(epochs)
+
+##############################################################################
+# Get single epochs for one condition:
+
+epochs_data = epochs['aud_l'].get_data()
+print(epochs_data.shape)
+
+##############################################################################
+# epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time
+# instants).
+#
+# Scipy supports read and write of matlab files. You can save your single
+# trials with:
+
+from scipy import io
+io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
+
+##############################################################################
+# or if you want to keep all the information about the data you can save your
+# epochs in a fif file:
+
+epochs.save('sample-epo.fif')
+
+##############################################################################
+# and read them later with:
+
+saved_epochs = mne.read_epochs('sample-epo.fif')
+
+##############################################################################
+# Compute evoked responses for auditory responses by averaging and plot it:
+
+evoked = epochs['aud_l'].average()
+print(evoked)
+evoked.plot()
+
+##############################################################################
+# .. topic:: Exercise
+#
+#   1. Extract the max value of each epoch
+
+max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
+print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
+
+##############################################################################
+# It is also possible to read evoked data stored in a fif file:
+
+evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
+evoked1 = mne.read_evokeds(
+    evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True)
+
+##############################################################################
+# Or another one stored in the same file:
+
+evoked2 = mne.read_evokeds(
+    evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True)
+
+##############################################################################
+# Compute a contrast:
+
+contrast = evoked1 - evoked2
+print(contrast)
+
+##############################################################################
+# Time-Frequency: Induced power and inter trial coherence
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+#
+# Define parameters:
+
+import numpy as np
+n_cycles = 2  # number of cycles in Morlet wavelet
+freqs = np.arange(7, 30, 3)  # frequencies of interest
+
+##############################################################################
+# Compute induced power and phase-locking values and plot gradiometers:
+
+from mne.time_frequency import tfr_morlet
+power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
+                        return_itc=True, decim=3, n_jobs=1)
+# power.plot()
+
+##############################################################################
+# Inverse modeling: MNE and dSPM on evoked and raw data
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+#
+# Import the required functions:
+
+from mne.minimum_norm import apply_inverse, read_inverse_operator
+
+##############################################################################
+# Read the inverse operator:
+
+fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
+inverse_operator = read_inverse_operator(fname_inv)
+
+##############################################################################
+# Define the inverse parameters:
+
+snr = 3.0
+lambda2 = 1.0 / snr ** 2
+method = "dSPM"
+
+##############################################################################
+# Compute the inverse solution:
+
+stc = apply_inverse(evoked, inverse_operator, lambda2, method)
+
+##############################################################################
+# Save the source time courses to disk:
+
+stc.save('mne_dSPM_inverse')
+
+##############################################################################
+# Now, let's compute dSPM on a raw file within a label:
+
+fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
+label = mne.read_label(fname_label)
+
+##############################################################################
+# Compute inverse solution during the first 15s:
+
+from mne.minimum_norm import apply_inverse_raw
+start, stop = raw.time_as_index([0, 15])  # read the first 15s of data
+stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
+                        start, stop)
+
+##############################################################################
+# Save result in stc files:
+
+stc.save('mne_dSPM_raw_inverse_Aud')
+
+##############################################################################
+# What else can you do?
+# ^^^^^^^^^^^^^^^^^^^^^
+#
+#     - detect heart beat QRS component
+#     - detect eye blinks and EOG artifacts
+#     - compute SSP projections to remove ECG or EOG artifacts
+#     - compute Independent Component Analysis (ICA) to remove artifacts or
+#       select latent sources
+#     - estimate noise covariance matrix from Raw and Epochs
+#     - visualize cross-trial response dynamics using epochs images
+#     - compute forward solutions
+#     - estimate power in the source space
+#     - estimate connectivity in sensor and source space
+#     - morph stc from one brain to another for group studies
+#     - compute mass univariate statistics base on custom contrasts
+#     - visualize source estimates
+#     - export raw, epochs, and evoked data to other python data analysis
+#       libraries e.g. pandas
+#     - and many more things ...
+#
+# Want to know more ?
+# ^^^^^^^^^^^^^^^^^^^
+#
+# Browse :ref:`examples-index` gallery.
+
+print("Done!")
diff --git a/tutorials/plot_modifying_data_inplace.py b/tutorials/plot_modifying_data_inplace.py
new file mode 100644
index 0000000..a556ebf
--- /dev/null
+++ b/tutorials/plot_modifying_data_inplace.py
@@ -0,0 +1,74 @@
+"""
+.. _tut_modifying_data_inplace:
+
+Modifying data in-place
+=======================
+"""
+
+from __future__ import print_function
+
+import mne
+import os.path as op
+import numpy as np
+from matplotlib import pyplot as plt
+
+###############################################################################
+# It is often necessary to modify data once you have loaded it into memory.
+# Common examples of this are signal processing, feature extraction, and data
+# cleaning. Some functionality is pre-built into MNE-python, though it is also
+# possible to apply an arbitrary function to the data.
+
+# Load an example dataset, the preload flag loads the data into memory now
+data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
+                    'sample', 'sample_audvis_raw.fif')
+raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
+raw = raw.crop(0, 2)
+print(raw)
+
+###############################################################################
+# Signal processing
+# -----------------
+#
+# Most MNE objects have in-built methods for filtering:
+
+filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)]
+f, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10))
+_ = ax.plot(raw._data[0])
+for fband in filt_bands:
+    raw_filt = raw.copy()
+    raw_filt.filter(*fband)
+    _ = ax2.plot(raw_filt._data[0])
+ax2.legend(filt_bands)
+ax.set_title('Raw data')
+ax2.set_title('Band-pass filtered data')
+
+###############################################################################
+# In addition, there are functions for applying the Hilbert transform, which is
+# useful to calculate phase / amplitude of your signal
+
+# Filter signal, then take hilbert transform
+raw_band = raw.copy()
+raw_band.filter(12, 18)
+raw_hilb = raw_band.copy()
+hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True)
+raw_hilb.apply_hilbert(hilb_picks)
+print(raw_hilb._data.dtype)
+
+###############################################################################
+# Finally, it is possible to apply arbitrary to your data to do what you want.
+# Here we will use this to take the amplitude and phase of the hilbert
+# transformed data. (note that you can use `amplitude=True` in the call to
+# :func:`mne.io.Raw.apply_hilbert` to do this automatically).
+
+# Take the amplitude and phase
+raw_amp = raw_hilb.copy()
+raw_amp.apply_function(np.abs, hilb_picks, float, 1)
+raw_phase = raw_hilb.copy()
+raw_phase.apply_function(np.angle, hilb_picks, float, 1)
+
+f, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10))
+a1.plot(raw_band._data[hilb_picks[0]])
+a1.plot(raw_amp._data[hilb_picks[0]])
+a2.plot(raw_phase._data[hilb_picks[0]])
+a1.set_title('Amplitude of frequency band')
+a2.set_title('Phase of frequency band')
diff --git a/tutorials/plot_raw_objects.py b/tutorials/plot_raw_objects.py
new file mode 100644
index 0000000..0a2284f
--- /dev/null
+++ b/tutorials/plot_raw_objects.py
@@ -0,0 +1,133 @@
+"""
+.. _tut_raw_objects
+
+The :class:`Raw <mne.io.RawFIF>` data structure: continuous data
+================================================================
+"""
+
+from __future__ import print_function
+
+import mne
+import os.path as op
+from matplotlib import pyplot as plt
+
+###############################################################################
+# Continuous data is stored in objects of type :class:`Raw <mne.io.RawFIF>`.
+# The core data structure is simply a 2D numpy array (channels × samples,
+# `._data`) combined with an :class:`Info <mne.io.meas_info.Info>` object
+# (`.info`) (:ref:`tut_info_objects`.
+#
+# The most common way to load continuous data is from a .fif file. For more
+# information on :ref:`loading data from other formats <ch_raw>`, or creating
+# it :ref:`from scratch <tut_creating_data_structures>`.
+
+
+###############################################################################
+# Loading continuous data
+# -----------------------
+
+# Load an example dataset, the preload flag loads the data into memory now
+data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
+                    'sample', 'sample_audvis_raw.fif')
+raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
+
+# Give the sample rate
+print('sample rate:', raw.info['sfreq'], 'Hz')
+# Give the size of the data matrix
+print('channels x samples:', raw._data.shape)
+
+###############################################################################
+# Information about the channels contained in the :class:`Raw <mne.io.RawFIF>`
+# object is contained in the :class:`Info <mne.io.meas_info.Info>` attribute.
+# This is essentially a dictionary with a number of relevant fields (see
+# :ref:`tut_info_objects`).
+
+
+###############################################################################
+# Indexing data
+# -------------
+#
+# There are two ways to access the data stored within :class:`Raw
+# <mne.io.RawFIF>` objects. One is by accessing the underlying data array, and
+# the other is to index the :class:`Raw <mne.io.RawFIF>` object directly.
+#
+# To access the data array of :class:`Raw <mne.io.Raw>` objects, use the
+# `_data` attribute. Note that this is only present if `preload==True`.
+
+print('Shape of data array:', raw._data.shape)
+array_data = raw._data[0, :1000]
+_ = plt.plot(array_data)
+
+###############################################################################
+# You can also pass an index directly to the :class:`Raw <mne.io.RawFIF>`
+# object. This will return an array of times, as well as the data representing
+# those timepoints. This may be used even if the data is not preloaded:
+
+# Extract data from the first 5 channels, from 1 s to 3 s.
+sfreq = raw.info['sfreq']
+data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
+_ = plt.plot(times, data.T)
+_ = plt.title('Sample channels')
+
+###############################################################################
+# -----------------------------------------
+# Selecting subsets of channels and samples
+# -----------------------------------------
+#
+# It is possible to use more intelligent indexing to extract data, using
+# channel names, types or time ranges.
+
+# Pull all MEG gradiometer channels:
+# Make sure to use copy==True or it will overwrite the data
+meg_only = raw.pick_types(meg=True, copy=True)
+eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
+
+# The MEG flag in particular lets you specify a string for more specificity
+grad_only = raw.pick_types(meg='grad', copy=True)
+
+# Or you can use custom channel names
+pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
+specific_chans = raw.pick_channels(pick_chans, copy=True)
+print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
+
+###############################################################################
+# Notice the different scalings of these types
+
+f, (a1, a2) = plt.subplots(2, 1)
+eeg, times = eeg_only[0, :int(sfreq * 2)]
+meg, times = meg_only[0, :int(sfreq * 2)]
+a1.plot(times, meg[0])
+a2.plot(times, eeg[0])
+
+###############################################################################
+# You can restrict the data to a specific time range
+
+restricted = raw.crop(5, 7)  # in seconds
+print('New time range from', restricted.times.min(), 's to',
+      restricted.times.max(), 's')
+
+###############################################################################
+# And drop channels by name
+
+restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
+print('Number of channels reduced from', raw.info['nchan'], 'to',
+      restricted.info['nchan'])
+
+###############################################################################
+# --------------------------------------------------
+# Concatenating :class:`Raw <mne.io.RawFIF>` objects
+# --------------------------------------------------
+#
+# :class:`Raw <mne.io.RawFIF>` objects can be concatenated in time by using the
+# :func:`append <mne.io.RawFIF.append>` function. For this to work, they must
+# have the same number of channels and their :class:`Info
+# <mne.io.meas_info.Info>` structures should be compatible.
+
+# Create multiple :class:`Raw <mne.io.RawFIF>` objects
+raw1 = raw.copy().crop(0, 10)
+raw2 = raw.copy().crop(10, 20)
+raw3 = raw.copy().crop(20, 100)
+
+# Concatenate in time (also works without preloading)
+raw1.append([raw2, raw3])
+print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
diff --git a/tutorials/plot_source_localization_basics.py b/tutorials/plot_source_localization_basics.py
new file mode 100644
index 0000000..8340ba6
--- /dev/null
+++ b/tutorials/plot_source_localization_basics.py
@@ -0,0 +1,98 @@
+"""
+.. _tut_inverse_basics:
+
+Basics of source localization
+=============================
+
+Authors: Alexandre Gramfort <alexandre.gramfort at telecom-paristech.fr>
+         Denis Engemann <denis.engemann at gmail.com>
+
+"""
+import numpy as np
+import mne
+from mne.datasets import sample
+from mne.minimum_norm import (make_inverse_operator, apply_inverse,
+                              write_inverse_operator)
+
+mne.set_log_level('WARNING')
+
+##############################################################################
+# Process MEG data
+
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+
+raw = mne.io.Raw(raw_fname)
+events = mne.find_events(raw, stim_channel='STI 014')
+
+event_id = dict(aud_r=1)  # event trigger and conditions
+tmin = -0.2  # start of each epoch (200ms before the trigger)
+tmax = 0.5  # end of each epoch (500ms after the trigger)
+raw.info['bads'] = ['MEG 2443', 'EEG 053']
+picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
+                       exclude='bads')
+baseline = (None, 0)  # means from the first instant to t = 0
+reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
+
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
+                    picks=picks, baseline=baseline, reject=reject)
+
+# compute regularized noise covariance
+
+noise_cov = mne.compute_covariance(
+    epochs, tmax=0., method=['shrunk', 'empirical'])
+
+fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
+
+##############################################################################
+# Compute the evoked response
+
+evoked = epochs.average()
+evoked.plot()
+evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
+
+
+##############################################################################
+# Inverse modeling: MNE and dSPM on evoked and raw data
+
+# Read the forward solution and compute the inverse operator
+
+fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
+fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
+
+# Restrict forward solution as necessary for MEG
+fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
+
+# make an M/EEG, MEG-only, and EEG-only inverse operators
+info = evoked.info
+inverse_operator = make_inverse_operator(info, fwd, noise_cov,
+                                         loose=0.2, depth=0.8)
+
+write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
+                       inverse_operator)
+
+# Compute inverse solution
+
+method = "dSPM"
+snr = 3.
+lambda2 = 1. / snr ** 2
+stc = apply_inverse(evoked, inverse_operator, lambda2,
+                    method=method, pick_ori=None)
+
+
+# visualize
+
+subjects_dir = data_path + '/subjects'
+brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
+brain.set_data_time_index(45)
+brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
+brain.show_view('lateral')
+
+# morph data to average brain
+stc_fsaverage = stc.morph(subject_to='fsaverage', subjects_dir=subjects_dir)
+
+brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
+                                     subjects_dir=subjects_dir)
+brain_fsaverage.set_data_time_index(45)
+brain_fsaverage.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
+brain_fsaverage.show_view('lateral')
diff --git a/tutorials/plot_spatio_temporal_cluster_stats_sensor.py b/tutorials/plot_spatio_temporal_cluster_stats_sensor.py
new file mode 100644
index 0000000..c43b514
--- /dev/null
+++ b/tutorials/plot_spatio_temporal_cluster_stats_sensor.py
@@ -0,0 +1,193 @@
+"""
+.. _stats_cluster_sensors_2samp_spatial:
+
+=====================================================
+Spatiotemporal permutation F-test on full sensor data
+=====================================================
+
+Tests for differential evoked responses in at least
+one condition using a permutation clustering test.
+The FieldTrip neighbor templates will be used to determine
+the adjacency between sensors. This serves as a spatial prior
+to the clustering. Significant spatiotemporal clusters will then
+be visualized using custom matplotlib code.
+"""
+# Authors: Denis Engemann <denis.engemann at gmail.com>
+#
+# License: BSD (3-clause)
+
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from mne.viz import plot_topomap
+
+import mne
+from mne.stats import spatio_temporal_cluster_test
+from mne.datasets import sample
+from mne.channels import read_ch_connectivity
+
+print(__doc__)
+
+###############################################################################
+
+# Set parameters
+data_path = sample.data_path()
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
+tmin = -0.2
+tmax = 0.5
+
+# Setup for reading the raw data
+raw = mne.io.Raw(raw_fname, preload=True)
+raw.filter(1, 30)
+events = mne.read_events(event_fname)
+
+###############################################################################
+# Read epochs for the channel of interest
+
+picks = mne.pick_types(raw.info, meg='mag', eog=True)
+
+reject = dict(mag=4e-12, eog=150e-6)
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
+                    baseline=None, reject=reject, preload=True)
+
+epochs.drop_channels(['EOG 061'])
+epochs.equalize_event_counts(event_id, copy=False)
+
+condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
+X = [epochs[k].get_data() for k in condition_names]  # as 3D matrix
+X = [np.transpose(x, (0, 2, 1)) for x in X]  # transpose for clustering
+
+
+###############################################################################
+# load FieldTrip neighbor definition to setup sensor connectivity
+connectivity, ch_names = read_ch_connectivity('neuromag306mag')
+
+print(type(connectivity))  # it's a sparse matrix!
+
+plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
+           interpolation='nearest')
+plt.xlabel('{} Magnetometers'.format(len(ch_names)))
+plt.ylabel('{} Magnetometers'.format(len(ch_names)))
+plt.title('Between-sensor adjacency')
+
+###############################################################################
+# Compute permutation statistic
+#
+# How does it work? We use clustering to `bind` together features which are
+# similar. Our features are the magnetic fields measured over our sensor
+# array at different times. This reduces the multiple comparison problem.
+# To compute the actual test-statistic, we first sum all F-values in all
+# clusters. We end up with one statistic for each cluster.
+# Then we generate a distribution from the data by shuffling our conditions
+# between our samples and recomputing our clusters and the test statistics.
+# We test for the significance of a given cluster by computing the probability
+# of observing a cluster of that size. For more background read:
+# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
+# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
+# doi:10.1016/j.jneumeth.2007.03.024
+
+
+# set cluster threshold
+threshold = 50.0  # very high, but the test is quite sensitive on this data
+# set family-wise p-value
+p_accept = 0.001
+
+cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
+                                             threshold=threshold, tail=1,
+                                             n_jobs=2,
+                                             connectivity=connectivity)
+
+T_obs, clusters, p_values, _ = cluster_stats
+good_cluster_inds = np.where(p_values < p_accept)[0]
+
+# Note. The same functions works with source estimate. The only differences
+# are the origin of the data, the size, and the connectivity definition.
+# It can be used for single trials or for groups of subjects.
+
+###############################################################################
+# Visualize clusters
+
+# configure variables for visualization
+times = epochs.times * 1e3
+colors = 'r', 'r', 'steelblue', 'steelblue'
+linestyles = '-', '--', '-', '--'
+
+# grand average as numpy arrray
+grand_ave = np.array(X).mean(axis=1)
+
+# get sensor positions via layout
+pos = mne.find_layout(epochs.info).pos
+
+# loop over significant clusters
+for i_clu, clu_idx in enumerate(good_cluster_inds):
+    # unpack cluster infomation, get unique indices
+    time_inds, space_inds = np.squeeze(clusters[clu_idx])
+    ch_inds = np.unique(space_inds)
+    time_inds = np.unique(time_inds)
+
+    # get topography for F stat
+    f_map = T_obs[time_inds, ...].mean(axis=0)
+
+    # get signals at significant sensors
+    signals = grand_ave[..., ch_inds].mean(axis=-1)
+    sig_times = times[time_inds]
+
+    # create spatial mask
+    mask = np.zeros((f_map.shape[0], 1), dtype=bool)
+    mask[ch_inds, :] = True
+
+    # initialize figure
+    fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
+    title = 'Cluster #{0}'.format(i_clu + 1)
+    fig.suptitle(title, fontsize=14)
+
+    # plot average test statistic and mark significant sensors
+    image, _ = plot_topomap(f_map, pos, mask=mask, axis=ax_topo,
+                            cmap='Reds', vmin=np.min, vmax=np.max)
+
+    # advanced matplotlib for showing image with figure and colorbar
+    # in one plot
+    divider = make_axes_locatable(ax_topo)
+
+    # add axes for colorbar
+    ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
+    plt.colorbar(image, cax=ax_colorbar)
+    ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
+        *sig_times[[0, -1]]
+    ))
+
+    # add new axis for time courses and plot time courses
+    ax_signals = divider.append_axes('right', size='300%', pad=1.2)
+    for signal, name, col, ls in zip(signals, condition_names, colors,
+                                     linestyles):
+        ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
+
+    # add information
+    ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
+    ax_signals.set_xlim([times[0], times[-1]])
+    ax_signals.set_xlabel('time [ms]')
+    ax_signals.set_ylabel('evoked magnetic fields [fT]')
+
+    # plot significant time range
+    ymin, ymax = ax_signals.get_ylim()
+    ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
+                             color='orange', alpha=0.3)
+    ax_signals.legend(loc='lower right')
+    ax_signals.set_ylim(ymin, ymax)
+
+    # clean up viz
+    mne.viz.tight_layout(fig=fig)
+    fig.subplots_adjust(bottom=.05)
+    plt.show()
+
+"""
+Excercises
+----------
+
+- What is the smallest p-value you can obtain, given the finite number of
+   permutations?
+- use an F distribution to compute the threshold by tradition significance
+   levels. Hint: take a look at ```scipy.stats.distributions.f```
+"""

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-mne.git



More information about the debian-med-commit mailing list